summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/parameters.yaml66
-rw-r--r--doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json87
-rw-r--r--doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json88
-rw-r--r--doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json6
-rw-r--r--doc/api_samples/os-rescue/v2.87/server-rescue-req.json5
-rw-r--r--doc/api_samples/os-rescue/v2.87/server-rescue.json3
-rw-r--r--doc/api_samples/os-rescue/v2.87/server-unrescue-req.json3
-rw-r--r--doc/api_samples/versions/v21-version-get-resp.json2
-rw-r--r--doc/api_samples/versions/versions-get-resp.json2
-rw-r--r--doc/source/admin/aggregates.rst2
-rw-r--r--doc/source/admin/configuration/schedulers.rst2
-rw-r--r--doc/source/admin/virtual-gpu.rst15
-rw-r--r--doc/source/configuration/index.rst11
-rw-r--r--doc/source/configuration/policy-concepts.rst273
-rw-r--r--doc/source/contributor/microversions.rst2
-rw-r--r--doc/source/reference/glossary.rst2
-rw-r--r--doc/source/user/rescue.rst51
-rw-r--r--lower-constraints.txt4
-rw-r--r--nova/api/openstack/api_version_request.py4
-rw-r--r--nova/api/openstack/compute/admin_actions.py11
-rw-r--r--nova/api/openstack/compute/aggregates.py18
-rw-r--r--nova/api/openstack/compute/flavor_access.py4
-rw-r--r--nova/api/openstack/compute/flavors_extraspecs.py12
-rw-r--r--nova/api/openstack/compute/keypairs.py12
-rw-r--r--nova/api/openstack/compute/quota_classes.py4
-rw-r--r--nova/api/openstack/compute/rescue.py6
-rw-r--r--nova/api/openstack/compute/rest_api_version_history.rst8
-rw-r--r--nova/api/openstack/compute/server_external_events.py2
-rw-r--r--nova/api/openstack/compute/server_groups.py43
-rw-r--r--nova/api/openstack/compute/servers.py26
-rw-r--r--nova/api/openstack/compute/services.py8
-rw-r--r--nova/api/openstack/compute/views/servers.py38
-rw-r--r--nova/api/validation/extra_specs/resources.py6
-rw-r--r--nova/api/validation/extra_specs/traits.py32
-rw-r--r--nova/api/validation/extra_specs/validators.py15
-rw-r--r--nova/compute/api.py72
-rw-r--r--nova/compute/manager.py8
-rw-r--r--nova/compute/rpcapi.py1
-rw-r--r--nova/conf/workarounds.py56
-rw-r--r--nova/exception.py9
-rw-r--r--nova/policies/admin_actions.py6
-rw-r--r--nova/policies/aggregates.py18
-rw-r--r--nova/policies/base.py13
-rw-r--r--nova/policies/evacuate.py4
-rw-r--r--nova/policies/extended_server_attributes.py11
-rw-r--r--nova/policies/flavor_extra_specs.py63
-rw-r--r--nova/policies/instance_actions.py4
-rw-r--r--nova/policies/keypairs.py45
-rw-r--r--nova/policies/quota_class_sets.py22
-rw-r--r--nova/policies/quota_sets.py55
-rw-r--r--nova/policies/rescue.py4
-rw-r--r--nova/policies/server_external_events.py2
-rw-r--r--nova/policies/server_groups.py31
-rw-r--r--nova/policies/server_topology.py24
-rw-r--r--nova/policies/servers.py377
-rw-r--r--nova/policies/simple_tenant_usage.py4
-rw-r--r--nova/policy.py11
-rw-r--r--nova/privsep/qemu.py11
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-get-resp-rescue.json.tpl87
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json.tpl88
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json.tpl6
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue.json.tpl3
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-unrescue-req.json.tpl3
-rw-r--r--nova/tests/functional/api_sample_tests/test_keypairs.py6
-rw-r--r--nova/tests/functional/api_sample_tests/test_rescue.py6
-rw-r--r--nova/tests/functional/integrated_helpers.py72
-rw-r--r--nova/tests/functional/libvirt/test_vgpu.py138
-rw-r--r--nova/tests/functional/test_flavor_extraspecs.py53
-rw-r--r--nova/tests/functional/test_policy.py61
-rw-r--r--nova/tests/functional/test_server_rescue.py100
-rw-r--r--nova/tests/unit/api/openstack/compute/test_aggregates.py46
-rw-r--r--nova/tests/unit/api/openstack/compute/test_evacuate.py13
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py45
-rw-r--r--nova/tests/unit/api/openstack/compute/test_keypairs.py124
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quota_classes.py29
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quotas.py59
-rw-r--r--nova/tests/unit/api/openstack/compute/test_rescue.py42
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_reset_state.py1
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_topology.py27
-rw-r--r--nova/tests/unit/api/openstack/compute/test_serversV21.py686
-rw-r--r--nova/tests/unit/api/validation/extra_specs/test_validators.py3
-rw-r--r--nova/tests/unit/compute/test_compute.py38
-rw-r--r--nova/tests/unit/compute/test_compute_api.py182
-rw-r--r--nova/tests/unit/compute/test_shelve.py8
-rw-r--r--nova/tests/unit/fake_policy.py48
-rw-r--r--nova/tests/unit/network/test_neutron.py4
-rw-r--r--nova/tests/unit/policies/base.py36
-rw-r--r--nova/tests/unit/policies/test_admin_actions.py9
-rw-r--r--nova/tests/unit/policies/test_aggregates.py31
-rw-r--r--nova/tests/unit/policies/test_evacuate.py11
-rw-r--r--nova/tests/unit/policies/test_flavor_extra_specs.py414
-rw-r--r--nova/tests/unit/policies/test_hypervisors.py2
-rw-r--r--nova/tests/unit/policies/test_instance_actions.py14
-rw-r--r--nova/tests/unit/policies/test_instance_usage_audit_log.py6
-rw-r--r--nova/tests/unit/policies/test_keypairs.py209
-rw-r--r--nova/tests/unit/policies/test_limits.py2
-rw-r--r--nova/tests/unit/policies/test_lock_server.py8
-rw-r--r--nova/tests/unit/policies/test_pause_server.py8
-rw-r--r--nova/tests/unit/policies/test_quota_class_sets.py127
-rw-r--r--nova/tests/unit/policies/test_quota_sets.py210
-rw-r--r--nova/tests/unit/policies/test_server_external_events.py8
-rw-r--r--nova/tests/unit/policies/test_server_groups.py189
-rw-r--r--nova/tests/unit/policies/test_server_topology.py161
-rw-r--r--nova/tests/unit/policies/test_servers.py1468
-rw-r--r--nova/tests/unit/policies/test_services.py4
-rw-r--r--nova/tests/unit/policies/test_simple_tenant_usage.py81
-rw-r--r--nova/tests/unit/policies/test_suspend_server.py4
-rw-r--r--nova/tests/unit/privsep/test_qemu.py24
-rw-r--r--nova/tests/unit/test_policy.py10
-rw-r--r--nova/tests/unit/virt/hyperv/test_driver.py3
-rw-r--r--nova/tests/unit/virt/libvirt/fake_imagebackend.py5
-rw-r--r--nova/tests/unit/virt/libvirt/fakelibvirt.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py265
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py534
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py280
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_net.py50
-rw-r--r--nova/tests/unit/virt/test_hardware.py16
-rw-r--r--nova/tests/unit/virt/test_images.py10
-rw-r--r--nova/tests/unit/virt/xenapi/test_xenapi.py12
-rw-r--r--nova/tests/unit/volume/test_cinder.py97
-rw-r--r--nova/utils.py56
-rw-r--r--nova/virt/driver.py9
-rw-r--r--nova/virt/fake.py8
-rw-r--r--nova/virt/hardware.py7
-rw-r--r--nova/virt/hyperv/driver.py2
-rw-r--r--nova/virt/images.py20
-rw-r--r--nova/virt/libvirt/blockinfo.py202
-rw-r--r--nova/virt/libvirt/driver.py187
-rw-r--r--nova/virt/libvirt/vif.py19
-rw-r--r--nova/virt/libvirt/volume/net.py58
-rw-r--r--nova/virt/powervm/driver.py1
-rw-r--r--nova/virt/vmwareapi/driver.py2
-rw-r--r--nova/virt/xenapi/driver.py2
-rw-r--r--nova/volume/cinder.py11
-rw-r--r--releasenotes/notes/bp-policy-defaults-refresh-b8e6e2d6b1a7bc21.yaml137
-rw-r--r--releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml17
-rw-r--r--releasenotes/notes/stable_rescue_bfv-cd0e9f0f7e9eaa25.yaml10
-rw-r--r--releasenotes/notes/workarounds-libvirt-disable-native-luks-a4eccca8019db243.yaml26
-rw-r--r--releasenotes/notes/workarounds-libvirt-rbd-host-block-devices-ca5e3c187342ab4d.yaml23
-rw-r--r--requirements.txt4
-rw-r--r--tox.ini23
142 files changed, 6716 insertions, 2126 deletions
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 263835d848..9ed259923e 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -417,9 +417,9 @@ availability_zone_query_server:
description: |
Filter the server list result by server availability zone.
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -580,9 +580,9 @@ config_drive_query_server:
description: |
Filter the server list result by the config drive setting of the server.
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -599,9 +599,9 @@ created_at_query_server:
For example, ``2015-08-27T09:49:58-05:00``.
If you omit the time zone, the UTC time zone is assumed.
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -891,9 +891,9 @@ key_name_query_server:
description: |
Filter the server list result by keypair name.
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -946,9 +946,9 @@ launched_at_query_server:
For example, ``2015-08-27T09:49:58-05:00``.
If you omit the time zone, the UTC time zone is assumed.
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -1126,17 +1126,17 @@ power_state_query_server:
6: CRASHED
7: SUSPENDED
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
progress_query_server:
description: |
Filter the server list result by the progress of the server.
The value could be from 0 to 100 as integer.
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: integer
@@ -1378,9 +1378,9 @@ task_state_query_server:
description: |
Filter the server list result by task state.
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
tenant_id_query:
description: |
Specify the project ID (tenant ID) to show the rate and absolute limits.
@@ -1400,9 +1400,9 @@ terminated_at_query_server:
For example, ``2015-08-27T09:49:58-05:00``.
If you omit the time zone, the UTC time zone is assumed.
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -1448,9 +1448,9 @@ user_id_query_server:
description: |
Filter the list of servers by the given user ID.
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -1479,9 +1479,9 @@ vm_state_query_server:
- ``STOPPED``
- ``SUSPENDED``
- This parameter is restricted to administrators until microversion 2.82.
- If non-admin users specify this parameter before microversion 2.83, it
- is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
diff --git a/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json b/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json
new file mode 100644
index 0000000000..3500b3ebd8
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json
@@ -0,0 +1,87 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "r-d0bls59j",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 4,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "rescued",
+ "OS-SRV-USG:launched_at": "2020-02-07T17:39:49.259481",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2020-02-07T17:39:48Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "69bebe1c-3bdb-4feb-9b79-afa3d4782d95",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/69bebe1c-3bdb-4feb-9b79-afa3d4782d95",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/69bebe1c-3bdb-4feb-9b79-afa3d4782d95",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "RESCUE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2020-02-07T17:39:49Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json b/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json
new file mode 100644
index 0000000000..3388fb55a5
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json
@@ -0,0 +1,88 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "r-g20x6pwt",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2020-02-07T17:39:55.632592",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2020-02-07T17:39:54Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "5a0ffa96-ae59-4f82-b7a6-e0c9007cd576",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/5a0ffa96-ae59-4f82-b7a6-e0c9007cd576",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/5a0ffa96-ae59-4f82-b7a6-e0c9007cd576",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2020-02-07T17:39:56Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json b/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json
new file mode 100644
index 0000000000..1cfab52872
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json
@@ -0,0 +1,6 @@
+{
+ "rescue": {
+ "adminPass": "MySecretPass",
+ "rescue_image_ref": "70a599e0-31e7-49b7-b260-868f441e862b"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-rescue/v2.87/server-rescue-req.json b/doc/api_samples/os-rescue/v2.87/server-rescue-req.json
new file mode 100644
index 0000000000..3796600282
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-rescue-req.json
@@ -0,0 +1,5 @@
+{
+ "rescue": {
+ "adminPass": "MySecretPass"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-rescue/v2.87/server-rescue.json b/doc/api_samples/os-rescue/v2.87/server-rescue.json
new file mode 100644
index 0000000000..6cd942395f
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-rescue.json
@@ -0,0 +1,3 @@
+{
+ "adminPass": "MySecretPass"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json b/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json
new file mode 100644
index 0000000000..cafc9b13a8
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json
@@ -0,0 +1,3 @@
+{
+ "unrescue": null
+} \ No newline at end of file
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index 7e05eed56d..abfbcf4f35 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.86",
+ "version": "2.87",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index 3555b6b720..0e870df3ae 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.86",
+ "version": "2.87",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/source/admin/aggregates.rst b/doc/source/admin/aggregates.rst
index 5b271a69c6..bb60a96180 100644
--- a/doc/source/admin/aggregates.rst
+++ b/doc/source/admin/aggregates.rst
@@ -25,7 +25,7 @@ per-aggregate basis by setting the desired ``xxx_weight_multiplier`` aggregate
metadata.
Administrators are able to optionally expose a host aggregate as an
-:term:`availability zone`. Availability zones are different from host
+:term:`Availability Zone`. Availability zones are different from host
aggregates in that they are explicitly exposed to the user, and hosts can only
be in a single availability zone. Administrators can configure a default
availability zone where instances will be scheduled when the user fails to
diff --git a/doc/source/admin/configuration/schedulers.rst b/doc/source/admin/configuration/schedulers.rst
index 219870f933..1e832ccfee 100644
--- a/doc/source/admin/configuration/schedulers.rst
+++ b/doc/source/admin/configuration/schedulers.rst
@@ -24,7 +24,7 @@ By default, the scheduler ``driver`` is configured as a filter scheduler, as
described in the next section. In the default configuration, this scheduler
considers hosts that meet all the following criteria:
-* Are in the requested :term:`availability zone` (``AvailabilityZoneFilter``).
+* Are in the requested :term:`Availability Zone` (``AvailabilityZoneFilter``).
* Can service the request (``ComputeFilter``).
diff --git a/doc/source/admin/virtual-gpu.rst b/doc/source/admin/virtual-gpu.rst
index b4de23da8a..549967a8ee 100644
--- a/doc/source/admin/virtual-gpu.rst
+++ b/doc/source/admin/virtual-gpu.rst
@@ -363,17 +363,27 @@ For libvirt:
vGPU resources). The proposed workaround is to rebuild the instance after
resizing it. The rebuild operation allocates vGPUS to the instance.
+ .. versionchanged:: 21.0.0
+
+ This has been resolved in the Ussuri release. See `bug 1778563`_.
+
* Cold migrating an instance to another host will have the same problem as
resize. If you want to migrate an instance, make sure to rebuild it after the
migration.
+ .. versionchanged:: 21.0.0
+
+ This has been resolved in the Ussuri release. See `bug 1778563`_.
+
* Rescue images do not use vGPUs. An instance being rescued does not keep its
vGPUs during rescue. During that time, another instance can receive those
vGPUs. This is a known issue. The recommended workaround is to rebuild an
instance immediately after rescue. However, rebuilding the rescued instance
only helps if there are other free vGPUs on the host.
- .. note:: This has been resolved in the Rocky release [#]_.
+ .. versionchanged:: 18.0.0
+
+ This has been resolved in the Rocky release. See `bug 1762688`_.
For XenServer:
@@ -397,7 +407,8 @@ For XenServer:
* Multiple GPU types per compute is not supported by the XenServer driver.
-.. [#] https://bugs.launchpad.net/nova/+bug/1762688
+.. _bug 1778563: https://bugs.launchpad.net/nova/+bug/1778563
+.. _bug 1762688: https://bugs.launchpad.net/nova/+bug/1762688
.. Links
.. _Intel GVT-g: https://01.org/igvt-g
diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst
index 29d95e9154..de3bdba1dd 100644
--- a/doc/source/configuration/index.rst
+++ b/doc/source/configuration/index.rst
@@ -48,6 +48,17 @@ Policy
Nova, like most OpenStack projects, uses a policy language to restrict
permissions on REST API actions.
+* :doc:`Policy Concepts <policy-concepts>`: Starting in the Ussuri
+ release, Nova API policy defines new default roles with system scope
+ capabilities. These new changes improve the security level and
+ manageability of Nova API as they are richer in terms of handling access at
+ system and project level token with 'Read' and 'Write' roles.
+
+.. toctree::
+ :hidden:
+
+ policy-concepts
+
* :doc:`Policy Reference <policy>`: A complete reference of all
policy points in nova and what they impact.
diff --git a/doc/source/configuration/policy-concepts.rst b/doc/source/configuration/policy-concepts.rst
new file mode 100644
index 0000000000..b9ca84115b
--- /dev/null
+++ b/doc/source/configuration/policy-concepts.rst
@@ -0,0 +1,273 @@
+Understanding Nova Policies
+===========================
+
+Nova supports a rich policy system that has evolved significantly over its
+lifetime. Initially, this took the form of a large, mostly hand-written
+``policy.json`` file but, starting in the Newton (14.0.0) release, policy
+defaults have been defined in the codebase, requiring the ``policy.json``
+file only to override these defaults.
+
+In the Ussuri (21.0.0) release, further work was undertaken to address some
+issues that had been identified:
+
+#. No global vs project admin. The ``admin_only`` role is used for the global
+ admin that is able to make almost any change to Nova, and see all details
+ of the Nova system. The rule passes for any user with an admin role, it
+ doesn’t matter which project is used.
+
+#. No read-only roles. Since several APIs tend to share a single policy rule
+ for read and write actions, they did not provide the granularity necessary
+ for read-only access roles.
+
+#. The ``admin_or_owner`` role did not work as expected. For most APIs with
+ ``admin_or_owner``, the project authentication happened in a separate
+ component than API in Nova that did not honor changes to policy. As a
+ result, policy could not override hard-coded in-project checks.
+
+Keystone comes with ``admin``, ``member`` and ``reader`` roles by default.
+Please refer to :keystone-doc:`this document </admin/service-api-protection.html>`
+for more information about these new defaults. In addition, keystone supports
+a new "system scope" concept that makes it easier to protect deployment level
+resources from project or system level resources. Please refer to
+:keystone-doc:`this document </admin/tokens-overview.html#authorization-scopes>`
+and `system scope specification <https://specs.openstack.org/openstack/keystone-specs/specs/keystone/queens/system-scope.html>`_ to understand the scope concept.
+
+In the Nova 21.0.0 (Ussuri) release, Nova policies implemented
+the scope concept and default roles provided by keystone (admin, member,
+and reader). Using common roles from keystone reduces the likelihood of
+similar, but different, roles implemented across projects or deployments
+(e.g., a role called ``observer`` versus ``reader`` versus ``auditor``).
+With the help of the new defaults it is easier to understand who can do
+what across projects, reduces divergence, and increases interoperability.
+
+The below sections explain how these new defaults in the Nova can solve the
+first two issues mentioned above and extend more functionality to end users
+in a safe and secure way.
+
+More information is provided in the `nova specification <https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/policy-defaults-refresh.html>`_.
+
+Scope
+-----
+
+OpenStack Keystone supports different scopes in tokens.
+These are described :keystone-doc:`here </admin/tokens-overview.html#authorization-scopes>`.
+Token scopes represent the layer of authorization. Policy ``scope_types``
+represent the layer of authorization required to access an API.
+
+.. note::
+
+ The ``scope_type`` of each policy is hardcoded and is not
+ overridable via the policy file.
+
+Nova policies have implemented the scope concept by defining the ``scope_type``
+in policies. To know each policy's ``scope_type``, please refer to the
+:doc:`Policy Reference </configuration/policy>` and look for ``Scope Types`` or
+``Intended scope(s)`` in :doc:`Policy Sample File </configuration/sample-policy>`
+as shown in below examples.
+
+.. rubric:: ``system`` scope
+
+Policies with a ``scope_type`` of ``system`` means a user with a
+``system-scoped`` token has permission to access the resource. This can be
+seen as a global role. All the system-level operation's policies
+have defaulted to ``scope_type`` of ``['system']``.
+
+For example, consider the ``GET /os-hypervisors`` API.
+
+.. code::
+
+ # List all hypervisors.
+ # GET /os-hypervisors
+ # Intended scope(s): system
+ #"os_compute_api:os-hypervisors:list": "rule:system_reader_api"
+
+.. rubric:: ``project`` scope
+
+Policies with a ``scope_type`` of ``project`` means a user with a
+``project-scoped`` token has permission to access the resource. Project-level
+only operation's policies are defaulted to ``scope_type`` of ``['project']``.
+
+For example, consider the ``POST /os-server-groups`` API.
+
+.. code::
+
+ # Create a new server group
+ # POST /os-server-groups
+ # Intended scope(s): project
+ #"os_compute_api:os-server-groups:create": "rule:project_member_api"
+
+.. rubric:: ``system and project`` scope
+
+Policies with a ``scope_type`` of ``system and project`` means a user with a
+``system-scoped`` or ``project-scoped`` token has permission to access the
+resource. All the system and project level operation's policies have defaulted
+to ``scope_type`` of ``['system', 'project']``.
+
+For example, consider the ``POST /servers/{server_id}/action (os-migrateLive)``
+API.
+
+.. code::
+
+ # Live migrate a server to a new host without a reboot
+ # POST /servers/{server_id}/action (os-migrateLive)
+ # Intended scope(s): system, project
+ #"os_compute_api:os-migrate-server:migrate_live": "rule:system_admin_api"
+
+These scope types provide a way to differentiate between system-level and
+project-level access roles. You can control the information with scope of the
+users. This means you can control that none of the project level role can get
+the hypervisor information.
+
+Policy scope is disabled by default to allow operators to migrate from
+the old policy enforcement system in a graceful way. This can be
+enabled by configuring the :oslo.config:option:`oslo_policy.enforce_scope`
+option to ``True``.
+
+.. note::
+
+ [oslo_policy]
+ enforce_scope=True
+
+
+Roles
+-----
+
+You can refer to :keystone-doc:`this </admin/service-api-protection.html>`
+document to know about all available defaults from Keystone.
+
+Along with the ``scope_type`` feature, Nova policy defines new
+defaults for each policy.
+
+.. rubric:: ``reader``
+
+This provides read-only access to the resources within the ``system`` or
+``project``. Nova policies are defaulted to below rules:
+
+.. code::
+
+ system_reader_api
+ Default
+ role:reader and system_scope:all
+
+ system_or_project_reader
+ Default
+ (rule:system_reader_api) or (role:reader and project_id:%(project_id)s)
+
+.. rubric:: ``member``
+
+This role is to perform the project level write operation with combination
+to the system admin. Nova policies are defaulted to below rules:
+
+.. code::
+
+ project_member_api
+ Default
+ role:member and project_id:%(project_id)s
+
+ system_admin_or_owner
+ Default
+ (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s)
+
+.. rubric:: ``admin``
+
+This role is to perform the admin level write operation at system as well
+as at project-level operations. Nova policies are defaulted to below rules:
+
+.. code::
+
+ system_admin_api
+ Default
+ role:admin and system_scope:all
+
+ project_admin_api
+ Default
+ role:admin and project_id:%(project_id)s
+
+ system_admin_or_owner
+ Default
+ (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s)
+
+With these new defaults, you can solve the problem of:
+
+#. Providing the read-only access to the user. Polices are made more granular
+ and defaulted to reader rules. For exmaple: If you need to let someone audit
+ your deployment for security purposes.
+
+#. Customize the policy in better way. For example, you will be able
+ to provide access to project level user to perform live migration for their
+ server or any other project with their token.
+
+
+Backward Compatibility
+----------------------
+
+Backward compatibility with versions prior to 21.0.0 (Ussuri) is maintained by
+supporting the old defaults and disabling the ``scope_type`` feature by default.
+This means the old defaults and deployments that use them will keep working
+as-is. However, we encourage every deployment to switch to new policy.
+``scope_type`` will be enabled by default and the old defaults will be removed
+starting in the 23.0.0 (W) release.
+
+To implement the new default reader roles, some policies needed to become
+granular. They have been renamed, with the old names still supported for
+backwards compatibility.
+
+Migration Plan
+--------------
+
+To have a graceful migration, Nova provides two flags to switch to the new
+policy completely. You do not need to overwrite the policy file to adopt the
+new policy defaults.
+
+Here is step wise guide for migration:
+
+#. Create scoped token:
+
+ You need to create the new token with scope knowledge via below CLI:
+
+ - :keystone-doc:`Create System Scoped Token </admin/tokens-overview.html#operation_create_system_token>`.
+ - :keystone-doc:`Create Project Scoped Token </admin/tokens-overview.html#operation_create_project_scoped_token>`.
+
+#. Create new default roles in keystone if not done:
+
+ If you do not have new defaults in Keystone then you can create and re-run
+ the :keystone-doc:`Keystone Bootstrap </admin/bootstrap.html>`. Keystone
+ added this support in 14.0.0 (Rocky) release.
+
+#. Enable Scope Checks
+
+ The :oslo.config:option:`oslo_policy.enforce_scope` flag is to enable the
+ ``scope_type`` features. The scope of the token used in the request is
+ always compared to the ``scope_type`` of the policy. If the scopes do not
+ match, one of two things can happen. If :oslo.config:option:`oslo_policy.enforce_scope`
+ is True, the request will be rejected. If :oslo.config:option:`oslo_policy.enforce_scope`
+ is False, an warning will be logged, but the request will be accepted
+ (assuming the rest of the policy passes). The default value of this flag
+ is False.
+
+ .. note:: Before you enable this flag, you need to audit your users and make
+ sure everyone who needs system-level access has a system role
+ assignment in keystone.
+
+#. Enable new defaults
+
+ The :oslo.config:option:`oslo_policy.enforce_new_defaults` flag switches
+ the policy to new defaults-only. This flag controls whether or not to use
+ old deprecated defaults when evaluating policies. If True, the old
+ deprecated defaults are not evaluated. This means if any existing
+ token is allowed for old defaults but is disallowed for new defaults,
+ it will be rejected. The default value of this flag is False.
+
+ .. note:: Before you enable this flag, you need to educate users about the
+ different roles they need to use to continue using Nova APIs.
+
+
+#. Check for deprecated policies
+
+ A few policies were made more granular to implement the reader roles. New
+ policy names are available to use. If old policy names which are renamed
+ are overwritten in policy file, then warning will be logged. Please migrate
+ those policies to new policy names.
+
+We expect all deployments to migrate to new policy by 23.0.0 release so that
+we can remove the support of old policies.
diff --git a/doc/source/contributor/microversions.rst b/doc/source/contributor/microversions.rst
index 0922721524..e06a75d7d3 100644
--- a/doc/source/contributor/microversions.rst
+++ b/doc/source/contributor/microversions.rst
@@ -383,7 +383,7 @@ necessary to add changes to other places which describe your change:
located under `api-ref/source/`.
* If the microversion changes servers related APIs, update the
- `api-guide/source/server_concepts.rst` accordingly.
+ ``api-guide/source/server_concepts.rst`` accordingly.
.. _API Reference: https://docs.openstack.org/api-ref/compute/
diff --git a/doc/source/reference/glossary.rst b/doc/source/reference/glossary.rst
index b45ec11ccd..304467e908 100644
--- a/doc/source/reference/glossary.rst
+++ b/doc/source/reference/glossary.rst
@@ -33,7 +33,7 @@ Glossary
Host Aggregate
Host aggregates can be regarded as a mechanism to further partition an
- :term:`availability zone`; while availability zones are visible to
+ :term:`Availability Zone`; while availability zones are visible to
users, host aggregates are only visible to administrators. Host
aggregates provide a mechanism to allow administrators to assign
key-value pairs to groups of machines. Each node can have multiple
diff --git a/doc/source/user/rescue.rst b/doc/source/user/rescue.rst
index 3ac940bea1..8a50fe1114 100644
--- a/doc/source/user/rescue.rst
+++ b/doc/source/user/rescue.rst
@@ -2,9 +2,51 @@
Rescue an instance
==================
-Rescue mode provides a mechanism for access, even if an image renders
-the instance inaccessible. By default, it starts an instance from the
-initial image attaching the current boot disk as a secondary one.
+Instance rescue provides a mechanism for access, even if an image renders the
+instance inaccessible. Two rescue modes are currently provided.
+
+Instance rescue
+---------------
+
+By default the instance is booted from the provided rescue image or a fresh
+copy of the original instance image if a rescue image is not provided. The root
+disk and optional regenerated config drive are also attached to the instance
+for data recovery.
+
+.. note::
+
+ Rescuing a volume-backed instance is not supported with this mode.
+
+Stable device instance rescue
+-----------------------------
+
+As of 21.0.0 (Ussuri) an additional stable device rescue mode is available.
+This mode now supports the rescue of volume-backed instances.
+
+This mode keeps all devices both local and remote attached in their original
+order to the instance during the rescue while booting from the provided rescue
+image. This mode is enabled and controlled by the presence of
+``hw_rescue_device`` or ``hw_rescue_bus`` image properties on the provided
+rescue image.
+
+As their names suggest these properties control the rescue device type
+(``cdrom``, ``disk`` or ``floppy``) and bus type (``scsi``, ``virtio``,
+``ide``, or ``usb``) used when attaching the rescue image to the instance.
+
+Support for each combination of the ``hw_rescue_device`` and ``hw_rescue_bus``
+image properties is dependent on the underlying hypervisor and platform being
+used. For example the ``IDE`` bus is not available on POWER KVM based compute
+hosts.
+
+.. note::
+
+ This mode is only supported when using the Libvirt virt driver.
+
+ This mode is not supported when using LXC or Xen hypervisors as enabled by
+ the :oslo.config:option:`libvirt.virt_type` configurable on the computes.
+
+Usage
+-----
.. note::
@@ -13,9 +55,6 @@ initial image attaching the current boot disk as a secondary one.
loss of the original instance state and makes it impossible to
unrescue the instance.
- As of the 20.0.0 (Train) release rescuing a volume-backed server
- is not supported.
-
To perform an instance rescue, use the :command:`openstack server rescue`
command:
diff --git a/lower-constraints.txt b/lower-constraints.txt
index c8beee8dc3..15a539ec5f 100644
--- a/lower-constraints.txt
+++ b/lower-constraints.txt
@@ -63,7 +63,7 @@ netifaces==0.10.4
networkx==1.11
numpy==1.14.2
openstacksdk==0.35.0
-os-brick==2.6.2
+os-brick==3.0.1
os-client-config==1.29.0
os-resource-classes==0.4.0
os-service-types==1.7.0
@@ -81,7 +81,7 @@ oslo.i18n==3.15.3
oslo.log==3.36.0
oslo.messaging==10.3.0
oslo.middleware==3.31.0
-oslo.policy==2.3.0
+oslo.policy==3.1.0
oslo.privsep==1.33.2
oslo.reports==1.18.0
oslo.rootwrap==5.8.0
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
index ac187e6d5f..037b01124b 100644
--- a/nova/api/openstack/api_version_request.py
+++ b/nova/api/openstack/api_version_request.py
@@ -234,6 +234,8 @@ REST_API_VERSION_HISTORY = """REST API Version History:
* 2.86 - Add support for validation of known extra specs to the
``POST /flavors/{flavor_id}/os-extra_specs`` and
``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` APIs.
+ * 2.87 - Adds support for rescuing boot from volume instances when the
+ compute host reports the COMPUTE_BFV_RESCUE capability trait.
"""
# The minimum and maximum versions of the API supported
@@ -242,7 +244,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = '2.1'
-_MAX_API_VERSION = '2.86'
+_MAX_API_VERSION = '2.87'
DEFAULT_API_VERSION = _MIN_API_VERSION
# Almost all proxy APIs which are related to network, images and baremetal
diff --git a/nova/api/openstack/compute/admin_actions.py b/nova/api/openstack/compute/admin_actions.py
index c1208df499..6de6956bcf 100644
--- a/nova/api/openstack/compute/admin_actions.py
+++ b/nova/api/openstack/compute/admin_actions.py
@@ -40,8 +40,9 @@ class AdminActionsController(wsgi.Controller):
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on a server."""
context = req.environ['nova.context']
- context.can(aa_policies.POLICY_ROOT % 'reset_network', target={})
instance = common.get_instance(self.compute_api, context, id)
+ context.can(aa_policies.POLICY_ROOT % 'reset_network',
+ target={'project_id': instance.project_id})
try:
self.compute_api.reset_network(context, instance)
except exception.InstanceIsLocked as e:
@@ -53,8 +54,9 @@ class AdminActionsController(wsgi.Controller):
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server."""
context = req.environ['nova.context']
- context.can(aa_policies.POLICY_ROOT % 'inject_network_info', target={})
instance = common.get_instance(self.compute_api, context, id)
+ context.can(aa_policies.POLICY_ROOT % 'inject_network_info',
+ target={'project_id': instance.project_id})
try:
self.compute_api.inject_network_info(context, instance)
except exception.InstanceIsLocked as e:
@@ -67,12 +69,13 @@ class AdminActionsController(wsgi.Controller):
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
- context.can(aa_policies.POLICY_ROOT % 'reset_state', target={})
+ instance = common.get_instance(self.compute_api, context, id)
+ context.can(aa_policies.POLICY_ROOT % 'reset_state',
+ target={'project_id': instance.project_id})
# Identify the desired state from the body
state = state_map[body["os-resetState"]["state"]]
- instance = common.get_instance(self.compute_api, context, id)
instance.vm_state = state
instance.task_state = None
instance.save(admin_state_reset=True)
diff --git a/nova/api/openstack/compute/aggregates.py b/nova/api/openstack/compute/aggregates.py
index cf36a1fda0..5391cc93a8 100644
--- a/nova/api/openstack/compute/aggregates.py
+++ b/nova/api/openstack/compute/aggregates.py
@@ -52,7 +52,7 @@ class AggregateController(wsgi.Controller):
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
- context.can(aggr_policies.POLICY_ROOT % 'index')
+ context.can(aggr_policies.POLICY_ROOT % 'index', target={})
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': [self._marshall_aggregate(req, a)['aggregate']
for a in aggregates]}
@@ -67,7 +67,7 @@ class AggregateController(wsgi.Controller):
optional availability zone.
"""
context = _get_context(req)
- context.can(aggr_policies.POLICY_ROOT % 'create')
+ context.can(aggr_policies.POLICY_ROOT % 'create', target={})
host_aggregate = body["aggregate"]
name = common.normalize_name(host_aggregate["name"])
avail_zone = host_aggregate.get("availability_zone")
@@ -97,7 +97,7 @@ class AggregateController(wsgi.Controller):
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
- context.can(aggr_policies.POLICY_ROOT % 'show')
+ context.can(aggr_policies.POLICY_ROOT % 'show', target={})
try:
utils.validate_integer(id, 'id')
@@ -116,7 +116,7 @@ class AggregateController(wsgi.Controller):
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
- context.can(aggr_policies.POLICY_ROOT % 'update')
+ context.can(aggr_policies.POLICY_ROOT % 'update', target={})
updates = body["aggregate"]
if 'name' in updates:
updates['name'] = common.normalize_name(updates['name'])
@@ -144,7 +144,7 @@ class AggregateController(wsgi.Controller):
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
- context.can(aggr_policies.POLICY_ROOT % 'delete')
+ context.can(aggr_policies.POLICY_ROOT % 'delete', target={})
try:
utils.validate_integer(id, 'id')
@@ -169,7 +169,7 @@ class AggregateController(wsgi.Controller):
host = body['add_host']['host']
context = _get_context(req)
- context.can(aggr_policies.POLICY_ROOT % 'add_host')
+ context.can(aggr_policies.POLICY_ROOT % 'add_host', target={})
try:
utils.validate_integer(id, 'id')
@@ -198,7 +198,7 @@ class AggregateController(wsgi.Controller):
host = body['remove_host']['host']
context = _get_context(req)
- context.can(aggr_policies.POLICY_ROOT % 'remove_host')
+ context.can(aggr_policies.POLICY_ROOT % 'remove_host', target={})
try:
utils.validate_integer(id, 'id')
@@ -230,7 +230,7 @@ class AggregateController(wsgi.Controller):
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
- context.can(aggr_policies.POLICY_ROOT % 'set_metadata')
+ context.can(aggr_policies.POLICY_ROOT % 'set_metadata', target={})
try:
utils.validate_integer(id, 'id')
@@ -278,7 +278,7 @@ class AggregateController(wsgi.Controller):
def images(self, req, id, body):
"""Allows image cache management requests."""
context = _get_context(req)
- context.can(aggr_policies.NEW_POLICY_ROOT % 'images')
+ context.can(aggr_policies.NEW_POLICY_ROOT % 'images', target={})
try:
utils.validate_integer(id, 'id')
diff --git a/nova/api/openstack/compute/flavor_access.py b/nova/api/openstack/compute/flavor_access.py
index f431436000..e17e6f0ddc 100644
--- a/nova/api/openstack/compute/flavor_access.py
+++ b/nova/api/openstack/compute/flavor_access.py
@@ -63,7 +63,7 @@ class FlavorActionController(wsgi.Controller):
@validation.schema(flavor_access.add_tenant_access)
def _add_tenant_access(self, req, id, body):
context = req.environ['nova.context']
- context.can(fa_policies.POLICY_ROOT % "add_tenant_access")
+ context.can(fa_policies.POLICY_ROOT % "add_tenant_access", target={})
vals = body['addTenantAccess']
tenant = vals['tenant']
@@ -89,7 +89,7 @@ class FlavorActionController(wsgi.Controller):
def _remove_tenant_access(self, req, id, body):
context = req.environ['nova.context']
context.can(
- fa_policies.POLICY_ROOT % "remove_tenant_access")
+ fa_policies.POLICY_ROOT % "remove_tenant_access", target={})
vals = body['removeTenantAccess']
tenant = vals['tenant']
diff --git a/nova/api/openstack/compute/flavors_extraspecs.py b/nova/api/openstack/compute/flavors_extraspecs.py
index 0bfe07abe4..4f385d1287 100644
--- a/nova/api/openstack/compute/flavors_extraspecs.py
+++ b/nova/api/openstack/compute/flavors_extraspecs.py
@@ -60,7 +60,8 @@ class FlavorExtraSpecsController(wsgi.Controller):
def index(self, req, flavor_id):
"""Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
- context.can(fes_policies.POLICY_ROOT % 'index')
+ context.can(fes_policies.POLICY_ROOT % 'index',
+ target={'project_id': context.project_id})
return self._get_extra_specs(context, flavor_id)
# NOTE(gmann): Here should be 201 instead of 200 by v2.1
@@ -70,7 +71,7 @@ class FlavorExtraSpecsController(wsgi.Controller):
@validation.schema(flavors_extraspecs.create)
def create(self, req, flavor_id, body):
context = req.environ['nova.context']
- context.can(fes_policies.POLICY_ROOT % 'create')
+ context.can(fes_policies.POLICY_ROOT % 'create', target={})
specs = body['extra_specs']
self._check_extra_specs_value(req, specs)
@@ -88,7 +89,7 @@ class FlavorExtraSpecsController(wsgi.Controller):
@validation.schema(flavors_extraspecs.update)
def update(self, req, flavor_id, id, body):
context = req.environ['nova.context']
- context.can(fes_policies.POLICY_ROOT % 'update')
+ context.can(fes_policies.POLICY_ROOT % 'update', target={})
self._check_extra_specs_value(req, body)
if id not in body:
@@ -108,7 +109,8 @@ class FlavorExtraSpecsController(wsgi.Controller):
def show(self, req, flavor_id, id):
"""Return a single extra spec item."""
context = req.environ['nova.context']
- context.can(fes_policies.POLICY_ROOT % 'show')
+ context.can(fes_policies.POLICY_ROOT % 'show',
+ target={'project_id': context.project_id})
flavor = common.get_flavor(context, flavor_id)
try:
return {id: flavor.extra_specs[id]}
@@ -125,7 +127,7 @@ class FlavorExtraSpecsController(wsgi.Controller):
def delete(self, req, flavor_id, id):
"""Deletes an existing extra spec."""
context = req.environ['nova.context']
- context.can(fes_policies.POLICY_ROOT % 'delete')
+ context.can(fes_policies.POLICY_ROOT % 'delete', target={})
flavor = common.get_flavor(context, flavor_id)
try:
del flavor.extra_specs[id]
diff --git a/nova/api/openstack/compute/keypairs.py b/nova/api/openstack/compute/keypairs.py
index d234275c7a..502932cf9a 100644
--- a/nova/api/openstack/compute/keypairs.py
+++ b/nova/api/openstack/compute/keypairs.py
@@ -106,8 +106,7 @@ class KeypairController(wsgi.Controller):
key_type_value = params.get('type', keypair_obj.KEYPAIR_TYPE_SSH)
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'create',
- target={'user_id': user_id,
- 'project_id': context.project_id})
+ target={'user_id': user_id})
return_priv_key = False
try:
@@ -162,8 +161,7 @@ class KeypairController(wsgi.Controller):
# handle optional user-id for admin only
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'delete',
- target={'user_id': user_id,
- 'project_id': context.project_id})
+ target={'user_id': user_id})
try:
self.api.delete_key_pair(context, user_id, id)
except exception.KeypairNotFound as exc:
@@ -200,8 +198,7 @@ class KeypairController(wsgi.Controller):
context = req.environ['nova.context']
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'show',
- target={'user_id': user_id,
- 'project_id': context.project_id})
+ target={'user_id': user_id})
try:
keypair = self.api.get_key_pair(context, user_id, id)
@@ -242,8 +239,7 @@ class KeypairController(wsgi.Controller):
context = req.environ['nova.context']
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'index',
- target={'user_id': user_id,
- 'project_id': context.project_id})
+ target={'user_id': user_id})
if api_version_request.is_supported(req, min_version='2.35'):
limit, marker = common.get_limit_and_marker(req)
diff --git a/nova/api/openstack/compute/quota_classes.py b/nova/api/openstack/compute/quota_classes.py
index 6ad7f43af7..0077ff9158 100644
--- a/nova/api/openstack/compute/quota_classes.py
+++ b/nova/api/openstack/compute/quota_classes.py
@@ -93,7 +93,7 @@ class QuotaClassSetsController(wsgi.Controller):
def _show(self, req, id, filtered_quotas=None,
exclude_server_groups=False):
context = req.environ['nova.context']
- context.can(qcs_policies.POLICY_ROOT % 'show', {'quota_class': id})
+ context.can(qcs_policies.POLICY_ROOT % 'show', target={})
values = QUOTAS.get_class_quotas(context, id)
return self._format_quota_set(id, values, filtered_quotas,
exclude_server_groups)
@@ -119,7 +119,7 @@ class QuotaClassSetsController(wsgi.Controller):
def _update(self, req, id, body, filtered_quotas=None,
exclude_server_groups=False):
context = req.environ['nova.context']
- context.can(qcs_policies.POLICY_ROOT % 'update', {'quota_class': id})
+ context.can(qcs_policies.POLICY_ROOT % 'update', target={})
try:
utils.check_string_length(id, 'quota_class_name',
min_length=1, max_length=255)
diff --git a/nova/api/openstack/compute/rescue.py b/nova/api/openstack/compute/rescue.py
index 6afb62c008..ab2da84d63 100644
--- a/nova/api/openstack/compute/rescue.py
+++ b/nova/api/openstack/compute/rescue.py
@@ -16,6 +16,7 @@
from webob import exc
+from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import rescue
from nova.api.openstack import wsgi
@@ -56,11 +57,12 @@ class RescueController(wsgi.Controller):
rescue_image_ref = None
if body['rescue']:
rescue_image_ref = body['rescue'].get('rescue_image_ref')
-
+ allow_bfv_rescue = api_version_request.is_supported(req, '2.87')
try:
self.compute_api.rescue(context, instance,
rescue_password=password,
- rescue_image_ref=rescue_image_ref)
+ rescue_image_ref=rescue_image_ref,
+ allow_bfv_rescue=allow_bfv_rescue)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst
index 5168890b23..1ad0430318 100644
--- a/nova/api/openstack/compute/rest_api_version_history.rst
+++ b/nova/api/openstack/compute/rest_api_version_history.rst
@@ -1130,7 +1130,13 @@ for the following APIs:
* ``POST /flavors/{flavor_id}/os-extra_specs``
* ``PUT /flavors/{flavor_id}/os-extra_specs/{id}``
-Validation is only used for recognized extra spec namespaces, namely:
+Validation is only used for recognized extra spec namespaces, currently:
``accel``, ``aggregate_instance_extra_specs``, ``capabilities``, ``hw``,
``hw_rng``, ``hw_video``, ``os``, ``pci_passthrough``, ``powervm``, ``quota``,
``resources``, ``trait``, and ``vmware``.
+
+2.87 (Maximum in Ussuri)
+------------------------
+
+Adds support for rescuing boot from volume instances when the compute host
+reports the ``COMPUTE_BFV_RESCUE`` capability trait.
diff --git a/nova/api/openstack/compute/server_external_events.py b/nova/api/openstack/compute/server_external_events.py
index e14b23af2e..162bd93697 100644
--- a/nova/api/openstack/compute/server_external_events.py
+++ b/nova/api/openstack/compute/server_external_events.py
@@ -73,7 +73,7 @@ class ServerExternalEventsController(wsgi.Controller):
def create(self, req, body):
"""Creates a new instance event."""
context = req.environ['nova.context']
- context.can(see_policies.POLICY_ROOT % 'create')
+ context.can(see_policies.POLICY_ROOT % 'create', target={})
response_events = []
accepted_events = []
diff --git a/nova/api/openstack/compute/server_groups.py b/nova/api/openstack/compute/server_groups.py
index 059103017e..5571cce678 100644
--- a/nova/api/openstack/compute/server_groups.py
+++ b/nova/api/openstack/compute/server_groups.py
@@ -42,12 +42,6 @@ CONF = nova.conf.CONF
GROUP_POLICY_OBJ_MICROVERSION = "2.64"
-def _authorize_context(req, action):
- context = req.environ['nova.context']
- context.can(sg_policies.POLICY_ROOT % action)
- return context
-
-
def _get_not_deleted(context, uuids):
mappings = objects.InstanceMappingList.get_by_instance_uuids(
context, uuids)
@@ -126,22 +120,26 @@ class ServerGroupController(wsgi.Controller):
@wsgi.expected_errors(404)
def show(self, req, id):
"""Return data about the given server group."""
- context = _authorize_context(req, 'show')
+ context = req.environ['nova.context']
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
+ context.can(sg_policies.POLICY_ROOT % 'show',
+ target={'project_id': sg.project_id})
return {'server_group': self._format_server_group(context, sg, req)}
@wsgi.response(204)
@wsgi.expected_errors(404)
def delete(self, req, id):
"""Delete a server group."""
- context = _authorize_context(req, 'delete')
+ context = req.environ['nova.context']
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
+ context.can(sg_policies.POLICY_ROOT % 'delete',
+ target={'project_id': sg.project_id})
try:
sg.destroy()
except nova.exception.InstanceGroupNotFound as e:
@@ -152,9 +150,24 @@ class ServerGroupController(wsgi.Controller):
@validation.query_schema(schema.server_groups_query_param, '2.0', '2.74')
def index(self, req):
"""Returns a list of server groups."""
- context = _authorize_context(req, 'index')
+ context = req.environ['nova.context']
project_id = context.project_id
+ # NOTE(gmann): Using context's project_id as target here so
+ # that when we remove the default target from policy class,
+ # it does not fail if user requesting operation on for their
+ # own server group.
+ context.can(sg_policies.POLICY_ROOT % 'index',
+ target={'project_id': project_id})
if 'all_projects' in req.GET and context.is_admin:
+ # TODO(gmann): Remove the is_admin check in the above condition
+ # so that the below policy can raise error if not allowed.
+ # In existing behavior, if non-admin users requesting
+ # all projects server groups they do not get error instead
+ # get their own server groups. Once we switch to policy
+ # new defaults completly then we can remove the above check.
+ # Until then, let's keep the old behaviour.
+ context.can(sg_policies.POLICY_ROOT % 'index:all_projects',
+ target={})
sgs = objects.InstanceGroupList.get_all(context)
else:
sgs = objects.InstanceGroupList.get_by_project_id(
@@ -171,11 +184,13 @@ class ServerGroupController(wsgi.Controller):
@validation.schema(schema.create_v264, GROUP_POLICY_OBJ_MICROVERSION)
def create(self, req, body):
"""Creates a new server group."""
- context = _authorize_context(req, 'create')
-
+ context = req.environ['nova.context']
+ project_id = context.project_id
+ context.can(sg_policies.POLICY_ROOT % 'create',
+ target={'project_id': project_id})
try:
objects.Quotas.check_deltas(context, {'server_groups': 1},
- context.project_id, context.user_id)
+ project_id, context.user_id)
except nova.exception.OverQuota:
msg = _("Quota exceeded, too many server groups.")
raise exc.HTTPForbidden(explanation=msg)
@@ -201,7 +216,7 @@ class ServerGroupController(wsgi.Controller):
sg = objects.InstanceGroup(context, policy=policies[0])
try:
sg.name = vals.get('name')
- sg.project_id = context.project_id
+ sg.project_id = project_id
sg.user_id = context.user_id
sg.create()
except ValueError as e:
@@ -214,7 +229,7 @@ class ServerGroupController(wsgi.Controller):
if CONF.quota.recheck_quota:
try:
objects.Quotas.check_deltas(context, {'server_groups': 0},
- context.project_id,
+ project_id,
context.user_id)
except nova.exception.OverQuota:
sg.destroy()
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index e2f59d3dd1..d70bd8d45f 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -448,7 +448,6 @@ class ServersController(wsgi.Controller):
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
- context.can(server_policies.SERVERS % 'show')
cell_down_support = api_version_request.is_supported(
req, min_version=PARTIAL_CONSTRUCT_FOR_CELL_DOWN_MIN_VERSION)
show_server_groups = api_version_request.is_supported(
@@ -457,6 +456,9 @@ class ServersController(wsgi.Controller):
instance = self._get_server(
context, req, id, is_detail=True,
cell_down_support=cell_down_support)
+ context.can(server_policies.SERVERS % 'show',
+ target={'project_id': instance.project_id})
+
return self._view_builder.show(
req, instance, cell_down_support=cell_down_support,
show_server_groups=show_server_groups)
@@ -869,8 +871,9 @@ class ServersController(wsgi.Controller):
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
- context.can(server_policies.SERVERS % 'confirm_resize')
instance = self._get_server(context, req, id)
+ context.can(server_policies.SERVERS % 'confirm_resize',
+ target={'project_id': instance.project_id})
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
@@ -887,8 +890,9 @@ class ServersController(wsgi.Controller):
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
- context.can(server_policies.SERVERS % 'revert_resize')
instance = self._get_server(context, req, id)
+ context.can(server_policies.SERVERS % 'revert_resize',
+ target={'project_id': instance.project_id})
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
@@ -911,8 +915,9 @@ class ServersController(wsgi.Controller):
reboot_type = body['reboot']['type'].upper()
context = req.environ['nova.context']
- context.can(server_policies.SERVERS % 'reboot')
instance = self._get_server(context, req, id)
+ context.can(server_policies.SERVERS % 'reboot',
+ target={'project_id': instance.project_id})
try:
self.compute_api.reboot(context, instance, reboot_type)
@@ -1193,7 +1198,10 @@ class ServersController(wsgi.Controller):
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
- context.can(server_policies.SERVERS % 'create_image')
+ instance = self._get_server(context, req, id)
+ target = {'project_id': instance.project_id}
+ context.can(server_policies.SERVERS % 'create_image',
+ target=target)
entity = body["createImage"]
image_name = common.normalize_name(entity["name"])
@@ -1205,8 +1213,6 @@ class ServersController(wsgi.Controller):
api_version_request.MAX_IMAGE_META_PROXY_API_VERSION):
common.check_img_metadata_properties_quota(context, metadata)
- instance = self._get_server(context, req, id)
-
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -1214,7 +1220,7 @@ class ServersController(wsgi.Controller):
if compute_utils.is_volume_backed_instance(context, instance,
bdms):
context.can(server_policies.SERVERS %
- 'create_image:allow_volume_backed')
+ 'create_image:allow_volume_backed', target=target)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
@@ -1298,7 +1304,9 @@ class ServersController(wsgi.Controller):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
- context.can(server_policies.SERVERS % 'start', instance)
+ context.can(server_policies.SERVERS % 'start',
+ target={'user_id': instance.user_id,
+ 'project_id': instance.project_id})
try:
self.compute_api.start(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
diff --git a/nova/api/openstack/compute/services.py b/nova/api/openstack/compute/services.py
index 3c8783a445..04fe299077 100644
--- a/nova/api/openstack/compute/services.py
+++ b/nova/api/openstack/compute/services.py
@@ -231,7 +231,7 @@ class ServiceController(wsgi.Controller):
def delete(self, req, id):
"""Deletes the specified service."""
context = req.environ['nova.context']
- context.can(services_policies.BASE_POLICY_NAME % 'delete')
+ context.can(services_policies.BASE_POLICY_NAME % 'delete', target={})
if api_version_request.is_supported(
req, min_version=UUID_FOR_ID_MIN_VERSION):
@@ -347,7 +347,7 @@ class ServiceController(wsgi.Controller):
name
"""
context = req.environ['nova.context']
- context.can(services_policies.BASE_POLICY_NAME % 'list')
+ context.can(services_policies.BASE_POLICY_NAME % 'list', target={})
if api_version_request.is_supported(req, min_version='2.11'):
_services = self._get_services_list(req, ['forced_down'])
else:
@@ -368,7 +368,7 @@ class ServiceController(wsgi.Controller):
PUT /os-services/disable.
"""
context = req.environ['nova.context']
- context.can(services_policies.BASE_POLICY_NAME % 'update')
+ context.can(services_policies.BASE_POLICY_NAME % 'update', target={})
if api_version_request.is_supported(req, min_version='2.11'):
actions = self.actions.copy()
actions["force-down"] = self._forced_down
@@ -395,7 +395,7 @@ class ServiceController(wsgi.Controller):
# Validate the request context against the policy.
context = req.environ['nova.context']
- context.can(services_policies.BASE_POLICY_NAME % 'update')
+ context.can(services_policies.BASE_POLICY_NAME % 'update', target={})
# Get the service by uuid.
try:
diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py
index 344c680605..6c9f1006e6 100644
--- a/nova/api/openstack/compute/views/servers.py
+++ b/nova/api/openstack/compute/views/servers.py
@@ -184,30 +184,38 @@ class ViewBuilder(common.ViewBuilder):
return ret
@staticmethod
- def _get_host_status_unknown_only(context):
- # We will use the unknown_only variable to tell us what host status we
- # can show, if any:
- # * unknown_only = False means we can show any host status.
- # * unknown_only = True means that we can only show host
- # status: UNKNOWN. If the host status is anything other than
- # UNKNOWN, we will not include the host_status field in the
- # response.
- # * unknown_only = None means we cannot show host status at all and
- # we will not include the host_status field in the response.
+ def _get_host_status_unknown_only(context, instance=None):
+ """We will use the unknown_only variable to tell us what host status we
+ can show, if any:
+ * unknown_only = False means we can show any host status.
+ * unknown_only = True means that we can only show host
+ status: UNKNOWN. If the host status is anything other than
+ UNKNOWN, we will not include the host_status field in the
+ response.
+ * unknown_only = None means we cannot show host status at all and
+ we will not include the host_status field in the response.
+ """
unknown_only = None
# Check show:host_status policy first because if it passes, we know we
# can show any host status and need not check the more restrictive
# show:host_status:unknown-only policy.
+ # Keeping target as None (which means policy will default these target
+ # to context.project_id) for now which is case of 'detail' API which
+ # policy is default to system and project reader.
+ target = None
+ if instance is not None:
+ target = {'project_id': instance.project_id}
if context.can(
servers_policies.SERVERS % 'show:host_status',
- fatal=False):
+ fatal=False, target=target):
unknown_only = False
# If we are not allowed to show any/all host status, check if we can at
# least show only the host status: UNKNOWN.
elif context.can(
servers_policies.SERVERS %
'show:host_status:unknown-only',
- fatal=False):
+ fatal=False,
+ target=target):
unknown_only = True
return unknown_only
@@ -303,7 +311,8 @@ class ViewBuilder(common.ViewBuilder):
if show_extended_attr is None:
show_extended_attr = context.can(
- esa_policies.BASE_POLICY_NAME, fatal=False)
+ esa_policies.BASE_POLICY_NAME, fatal=False,
+ target={'project_id': instance.project_id})
if show_extended_attr:
properties = ['host', 'name', 'node']
if api_version_request.is_supported(request, min_version='2.3'):
@@ -357,7 +366,8 @@ class ViewBuilder(common.ViewBuilder):
add_delete_on_termination)
if (api_version_request.is_supported(request, min_version='2.16')):
if show_host_status is None:
- unknown_only = self._get_host_status_unknown_only(context)
+ unknown_only = self._get_host_status_unknown_only(
+ context, instance)
# If we're not allowed by policy to show host status at all,
# don't bother requesting instance host status from the compute
# API.
diff --git a/nova/api/validation/extra_specs/resources.py b/nova/api/validation/extra_specs/resources.py
index 2444d39fc1..54f59fb516 100644
--- a/nova/api/validation/extra_specs/resources.py
+++ b/nova/api/validation/extra_specs/resources.py
@@ -32,7 +32,7 @@ for resource_class in os_resource_classes.STANDARDS:
parameters=[
{
'name': 'group',
- 'pattern': r'(_[a-zA-z0-9_]*|\d+)?',
+ 'pattern': r'([a-zA-Z0-9_-]{1,64})?',
},
],
)
@@ -50,11 +50,11 @@ EXTRA_SPEC_VALIDATORS.append(
parameters=[
{
'name': 'group',
- 'pattern': r'(_[a-zA-z0-9_]*|\d+)?',
+ 'pattern': r'([a-zA-Z0-9_-]{1,64})?',
},
{
'name': 'resource',
- 'pattern': r'.+',
+ 'pattern': r'[A-Z0-9_]+',
},
],
)
diff --git a/nova/api/validation/extra_specs/traits.py b/nova/api/validation/extra_specs/traits.py
index 60ae165955..194350c2fa 100644
--- a/nova/api/validation/extra_specs/traits.py
+++ b/nova/api/validation/extra_specs/traits.py
@@ -36,16 +36,38 @@ for trait in os_traits.get_traits():
parameters=[
{
'name': 'group',
- 'pattern': r'(_[a-zA-z0-9_]*|\d+)?',
- },
- {
- 'name': 'trait',
- 'pattern': r'[a-zA-Z0-9_]+',
+ 'pattern': r'([a-zA-Z0-9_-]{1,64})?',
},
],
)
)
+EXTRA_SPEC_VALIDATORS.append(
+ base.ExtraSpecValidator(
+ name='trait{group}:CUSTOM_{trait}',
+ description=(
+ 'Require or forbid trait CUSTOM_{trait}.'
+ ),
+ value={
+ 'type': str,
+ 'enum': [
+ 'required',
+ 'forbidden',
+ ],
+ },
+ parameters=[
+ {
+ 'name': 'group',
+ 'pattern': r'([a-zA-Z0-9_-]{1,64})?',
+ },
+ {
+ 'name': 'trait',
+ 'pattern': r'[A-Z0-9_]+',
+ },
+ ],
+ )
+)
+
def register():
return EXTRA_SPEC_VALIDATORS
diff --git a/nova/api/validation/extra_specs/validators.py b/nova/api/validation/extra_specs/validators.py
index a1e3d9d50e..2163892d71 100644
--- a/nova/api/validation/extra_specs/validators.py
+++ b/nova/api/validation/extra_specs/validators.py
@@ -47,8 +47,15 @@ def validate(name: str, value: str):
validator.validate(name, value)
return
- namespace = name.split(':', 1)[0].split('{')[0] if ':' in name else None
- if not namespace or namespace not in NAMESPACES: # unregistered namespace
+ # check if there's a namespace; if not, we've done all we can do
+ if ':' not in name: # no namespace
+ return
+
+ # if there is, check if it's one we recognize
+ for namespace in NAMESPACES:
+ if re.fullmatch(namespace, name.split(':', 1)[0]):
+ break
+ else:
return
raise exception.ValidationError(
@@ -72,8 +79,8 @@ def load_validators():
# TODO(stephenfin): Make 'register' return a dict rather than a list?
for validator in ext.plugin.register():
VALIDATORS[validator.name] = validator
- if ':' in validator.name:
- NAMESPACES.add(validator.name.split(':', 1)[0].split('{')[0])
+ if ':' in validator.name_regex:
+ NAMESPACES.add(validator.name_regex.split(':', 1)[0])
load_validators()
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 5c543887d1..7f68070885 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -1382,53 +1382,6 @@ class API(base.Base):
return certs_to_return
- def _get_bdm_image_metadata(self, context, block_device_mapping,
- legacy_bdm=True):
- """If we are booting from a volume, we need to get the
- volume details from Cinder and make sure we pass the
- metadata back accordingly.
- """
- if not block_device_mapping:
- return {}
-
- for bdm in block_device_mapping:
- if (legacy_bdm and
- block_device.get_device_letter(
- bdm.get('device_name', '')) != 'a'):
- continue
- elif not legacy_bdm and bdm.get('boot_index') != 0:
- continue
-
- volume_id = bdm.get('volume_id')
- snapshot_id = bdm.get('snapshot_id')
- if snapshot_id:
- # NOTE(alaski): A volume snapshot inherits metadata from the
- # originating volume, but the API does not expose metadata
- # on the snapshot itself. So we query the volume for it below.
- snapshot = self.volume_api.get_snapshot(context, snapshot_id)
- volume_id = snapshot['volume_id']
-
- if bdm.get('image_id'):
- try:
- image_id = bdm['image_id']
- image_meta = self.image_api.get(context, image_id)
- return image_meta
- except Exception:
- raise exception.InvalidBDMImage(id=image_id)
- elif volume_id:
- try:
- volume = self.volume_api.get(context, volume_id)
- except exception.CinderConnectionFailed:
- raise
- except Exception:
- raise exception.InvalidBDMVolume(id=volume_id)
-
- if not volume.get('bootable', True):
- raise exception.InvalidBDMVolumeNotBootable(id=volume_id)
-
- return utils.get_image_metadata_from_volume(volume)
- return {}
-
@staticmethod
def _get_requested_instance_group(context, filter_properties):
if (not filter_properties or
@@ -1481,8 +1434,9 @@ class API(base.Base):
"when booting from volume")
raise exception.CertificateValidationFailed(message=msg)
image_id = None
- boot_meta = self._get_bdm_image_metadata(
- context, block_device_mapping, legacy_bdm)
+ boot_meta = utils.get_bdm_image_metadata(
+ context, self.image_api, self.volume_api, block_device_mapping,
+ legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
@@ -2162,7 +2116,7 @@ class API(base.Base):
try:
self._update_queued_for_deletion(context, instance, True)
except exception.InstanceMappingNotFound:
- LOG.info("Instance Mapping does not exist while attempting"
+ LOG.info("Instance Mapping does not exist while attempting "
"local delete cleanup.",
instance=instance)
@@ -4247,7 +4201,8 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
- rescue_image_ref=None, clean_shutdown=True):
+ rescue_image_ref=None, clean_shutdown=True,
+ allow_bfv_rescue=False):
"""Rescue the given instance."""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
@@ -4256,7 +4211,20 @@ class API(base.Base):
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
- if compute_utils.is_volume_backed_instance(context, instance, bdms):
+
+ volume_backed = compute_utils.is_volume_backed_instance(
+ context, instance, bdms)
+
+ if volume_backed and allow_bfv_rescue:
+ cn = objects.ComputeNode.get_by_host_and_nodename(
+ context, instance.host, instance.node)
+ traits = self.placementclient.get_provider_traits(
+ context, cn.uuid).traits
+ if os_traits.COMPUTE_RESCUE_BFV not in traits:
+ reason = _("Host unable to rescue a volume-backed instance")
+ raise exception.InstanceNotRescuable(instance_id=instance.uuid,
+ reason=reason)
+ elif volume_backed:
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance.uuid,
reason=reason)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 812b8024bc..75ef023ce6 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -5779,11 +5779,19 @@ class ComputeManager(manager.Manager):
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
+ # NOTE(sbauza): During a migration, the original allocation is against
+ # the migration UUID while the target allocation (for the destination
+ # node) is related to the instance UUID, so here we need to pass the
+ # new ones.
+ allocations = self.reportclient.get_allocs_for_consumer(
+ context, instance.uuid)['allocations']
+
try:
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image_meta, resize_instance,
+ allocations,
block_device_info, power_on)
except Exception:
# Note that we do not rollback port bindings to the source host
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 4d7ee71a6a..74d2f53f3c 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -393,6 +393,7 @@ class ComputeAPI(object):
'rocky': '5.0',
'stein': '5.1',
'train': '5.3',
+ 'ussuri': '5.11',
}
@property
diff --git a/nova/conf/workarounds.py b/nova/conf/workarounds.py
index d7cad708a7..b7e30bb258 100644
--- a/nova/conf/workarounds.py
+++ b/nova/conf/workarounds.py
@@ -271,6 +271,62 @@ Related options:
* ``compute_driver`` (libvirt)
* ``[libvirt]/images_type`` (rbd)
"""),
+ # TODO(lyarwood): Remove this workaround in the W release once all
+ # supported distros have rebased to a version of libgcrypt that does not
+ # have the performance issues listed below.
+ cfg.BoolOpt(
+ 'disable_native_luksv1',
+ default=False,
+ help="""
+When attaching encrypted LUKSv1 Cinder volumes to instances the Libvirt driver
+configures the encrypted disks to be natively decrypted by QEMU.
+
+A performance issue has been discovered in the libgcrypt library used by QEMU
+that serverly limits the I/O performance in this scenario.
+
+For more information please refer to the following bug report:
+
+RFE: hardware accelerated AES-XTS mode
+https://bugzilla.redhat.com/show_bug.cgi?id=1762765
+
+Enabling this workaround option will cause Nova to use the legacy dm-crypt
+based os-brick encryptor to decrypt the LUKSv1 volume.
+
+Note that enabling this option while using volumes that do not provide a host
+block device such as Ceph will result in a failure to boot from or attach the
+volume to an instance. See the ``[workarounds]/rbd_block_device`` option for a
+way to avoid this for RBD.
+
+Related options:
+
+* ``compute_driver`` (libvirt)
+* ``rbd_block_device`` (workarounds)
+"""),
+ # TODO(lyarwood): Remove this workaround in the W release when the
+ # above disable_native_luksv1 configurable is removed.
+ cfg.BoolOpt('rbd_volume_local_attach',
+ default=False,
+ help="""
+Attach RBD Cinder volumes to the compute as host block devices.
+
+When enabled this option instructs os-brick to connect RBD volumes locally on
+the compute host as block devices instead of natively through QEMU.
+
+This workaround does not currently support extending attached volumes.
+
+This can be used with the disable_native_luksv1 workaround configuration
+option to avoid the recently discovered performance issues found within the
+libgcrypt library.
+
+This workaround is temporary and will be removed during the W release once
+all impacted distributions have been able to update their versions of the
+libgcrypt library.
+
+Related options:
+
+* ``compute_driver`` (libvirt)
+* ``disable_qemu_native_luksv1`` (workarounds)
+"""),
]
diff --git a/nova/exception.py b/nova/exception.py
index af9d7ec7de..aa1dacd3cb 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -1393,6 +1393,15 @@ class UnsupportedHardware(Invalid):
"the '%(virt)s' virt driver")
+class UnsupportedRescueBus(Invalid):
+ msg_fmt = _("Requested rescue bus '%(bus)s' is not supported by "
+ "the '%(virt)s' virt driver")
+
+
+class UnsupportedRescueDevice(Invalid):
+ msg_fmt = _("Requested rescue device '%(device)s' is not supported")
+
+
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
diff --git a/nova/policies/admin_actions.py b/nova/policies/admin_actions.py
index 77e180c07b..bab6bd6452 100644
--- a/nova/policies/admin_actions.py
+++ b/nova/policies/admin_actions.py
@@ -32,7 +32,7 @@ admin_actions_policies = [
'path': '/servers/{server_id}/action (os-resetState)'
}
],
- scope_types=['system']),
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'inject_network_info',
check_str=base.SYSTEM_ADMIN,
@@ -43,7 +43,7 @@ admin_actions_policies = [
'path': '/servers/{server_id}/action (injectNetworkInfo)'
}
],
- scope_types=['system']),
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'reset_network',
check_str=base.SYSTEM_ADMIN,
@@ -54,7 +54,7 @@ admin_actions_policies = [
'path': '/servers/{server_id}/action (resetNetwork)'
}
],
- scope_types=['system'])
+ scope_types=['system', 'project'])
]
diff --git a/nova/policies/aggregates.py b/nova/policies/aggregates.py
index 5b6f7a51ea..ea629a5db1 100644
--- a/nova/policies/aggregates.py
+++ b/nova/policies/aggregates.py
@@ -25,7 +25,7 @@ NEW_POLICY_ROOT = 'compute:aggregates:%s'
aggregates_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'set_metadata',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_ADMIN,
description="Create or replace metadata for an aggregate",
operations=[
{
@@ -36,7 +36,7 @@ aggregates_policies = [
scope_types=['system']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'add_host',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_ADMIN,
description="Add a host to an aggregate",
operations=[
{
@@ -47,7 +47,7 @@ aggregates_policies = [
scope_types=['system']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_ADMIN,
description="Create an aggregate",
operations=[
{
@@ -58,7 +58,7 @@ aggregates_policies = [
scope_types=['system']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'remove_host',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_ADMIN,
description="Remove a host from an aggregate",
operations=[
{
@@ -69,7 +69,7 @@ aggregates_policies = [
scope_types=['system']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_ADMIN,
description="Update name and/or availability zone for an aggregate",
operations=[
{
@@ -80,7 +80,7 @@ aggregates_policies = [
scope_types=['system']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_READER,
description="List all aggregates",
operations=[
{
@@ -91,7 +91,7 @@ aggregates_policies = [
scope_types=['system']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_ADMIN,
description="Delete an aggregate",
operations=[
{
@@ -102,7 +102,7 @@ aggregates_policies = [
scope_types=['system']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_READER,
description="Show details for an aggregate",
operations=[
{
@@ -113,7 +113,7 @@ aggregates_policies = [
scope_types=['system']),
policy.DocumentedRuleDefault(
name=NEW_POLICY_ROOT % 'images',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_ADMIN,
description="Request image caching for an aggregate",
operations=[
{
diff --git a/nova/policies/base.py b/nova/policies/base.py
index 96b9d8e91a..32b18291e0 100644
--- a/nova/policies/base.py
+++ b/nova/policies/base.py
@@ -47,6 +47,7 @@ in nova 23.0.0 release.
# oslo.policy's RuleDefault objects.
SYSTEM_ADMIN = 'rule:system_admin_api'
SYSTEM_READER = 'rule:system_reader_api'
+PROJECT_ADMIN = 'rule:project_admin_api'
PROJECT_MEMBER = 'rule:project_member_api'
PROJECT_READER = 'rule:project_reader_api'
PROJECT_MEMBER_OR_SYSTEM_ADMIN = 'rule:system_admin_or_owner'
@@ -113,9 +114,19 @@ rules = [
deprecated_reason=DEPRECATED_REASON,
deprecated_since='21.0.0'),
policy.RuleDefault(
+ "project_admin_api",
+ "role:admin and project_id:%(project_id)s",
+ "Default rule for Project level admin APIs.",
+ deprecated_rule=DEPRECATED_ADMIN_POLICY,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since='21.0.0'),
+ policy.RuleDefault(
"project_member_api",
"role:member and project_id:%(project_id)s",
- "Default rule for Project level non admin APIs."),
+ "Default rule for Project level non admin APIs.",
+ deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since='21.0.0'),
policy.RuleDefault(
"project_reader_api",
"role:reader and project_id:%(project_id)s",
diff --git a/nova/policies/evacuate.py b/nova/policies/evacuate.py
index ba5572b04d..33b86f7a26 100644
--- a/nova/policies/evacuate.py
+++ b/nova/policies/evacuate.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-evacuate'
evacuate_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_ADMIN,
description="Evacuate a server from a failed host to a new host",
operations=[
{
@@ -32,7 +32,7 @@ evacuate_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['system', 'project']),
]
diff --git a/nova/policies/extended_server_attributes.py b/nova/policies/extended_server_attributes.py
index 40abacb99a..9a01285980 100644
--- a/nova/policies/extended_server_attributes.py
+++ b/nova/policies/extended_server_attributes.py
@@ -23,9 +23,9 @@ BASE_POLICY_NAME = 'os_compute_api:os-extended-server-attributes'
extended_server_attributes_policies = [
policy.DocumentedRuleDefault(
- BASE_POLICY_NAME,
- base.RULE_ADMIN_API,
- """Return extended attributes for server.
+ name=BASE_POLICY_NAME,
+ check_str=base.SYSTEM_ADMIN,
+ description="""Return extended attributes for server.
This rule will control the visibility for a set of servers attributes:
@@ -43,7 +43,7 @@ Microvision 2.75 added the above attributes in the ``PUT /servers/{server_id}``
and ``POST /servers/{server_id}/action (rebuild)`` API responses which are
also controlled by this policy rule, like the ``GET /servers*`` APIs.
""",
- [
+ operations=[
{
'method': 'GET',
'path': '/servers/{id}'
@@ -60,7 +60,8 @@ also controlled by this policy rule, like the ``GET /servers*`` APIs.
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
- ]
+ ],
+ scope_types=['system', 'project']
),
]
diff --git a/nova/policies/flavor_extra_specs.py b/nova/policies/flavor_extra_specs.py
index f305e42cbf..9355a9719d 100644
--- a/nova/policies/flavor_extra_specs.py
+++ b/nova/policies/flavor_extra_specs.py
@@ -23,61 +23,65 @@ POLICY_ROOT = 'os_compute_api:os-flavor-extra-specs:%s'
flavor_extra_specs_policies = [
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'show',
- base.RULE_ADMIN_OR_OWNER,
- "Show an extra spec for a flavor",
- [
+ name=POLICY_ROOT % 'show',
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ description="Show an extra spec for a flavor",
+ operations=[
{
'path': '/flavors/{flavor_id}/os-extra_specs/'
'{flavor_extra_spec_key}',
'method': 'GET'
}
- ]
+ ],
+ scope_types=['system', 'project']
),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'create',
- base.RULE_ADMIN_API,
- "Create extra specs for a flavor",
- [
+ name=POLICY_ROOT % 'create',
+ check_str=base.SYSTEM_ADMIN,
+ description="Create extra specs for a flavor",
+ operations=[
{
'path': '/flavors/{flavor_id}/os-extra_specs/',
'method': 'POST'
}
- ]
+ ],
+ scope_types=['system']
),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'update',
- base.RULE_ADMIN_API,
- "Update an extra spec for a flavor",
- [
+ name=POLICY_ROOT % 'update',
+ check_str=base.SYSTEM_ADMIN,
+ description="Update an extra spec for a flavor",
+ operations=[
{
'path': '/flavors/{flavor_id}/os-extra_specs/'
'{flavor_extra_spec_key}',
'method': 'PUT'
}
- ]
+ ],
+ scope_types=['system']
),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'delete',
- base.RULE_ADMIN_API,
- "Delete an extra spec for a flavor",
- [
+ name=POLICY_ROOT % 'delete',
+ check_str=base.SYSTEM_ADMIN,
+ description="Delete an extra spec for a flavor",
+ operations=[
{
'path': '/flavors/{flavor_id}/os-extra_specs/'
'{flavor_extra_spec_key}',
'method': 'DELETE'
}
- ]
+ ],
+ scope_types=['system']
),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'index',
- base.RULE_ADMIN_OR_OWNER,
- "List extra specs for a flavor. Starting with microversion 2.47, "
- "the flavor used for a server is also returned in the response "
- "when showing server details, updating a server or rebuilding a "
- "server. Starting with microversion 2.61, extra specs may be "
- "returned in responses for the flavor resource.",
- [
+ name=POLICY_ROOT % 'index',
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ description="List extra specs for a flavor. Starting with "
+ "microversion 2.47, the flavor used for a server is also returned "
+ "in the response when showing server details, updating a server or "
+ "rebuilding a server. Starting with microversion 2.61, extra specs "
+ "may be returned in responses for the flavor resource.",
+ operations=[
{
'path': '/flavors/{flavor_id}/os-extra_specs/',
'method': 'GET'
@@ -116,7 +120,8 @@ flavor_extra_specs_policies = [
'path': '/flavors/{flavor_id}',
'method': 'PUT'
}
- ]
+ ],
+ scope_types=['system', 'project']
),
]
diff --git a/nova/policies/instance_actions.py b/nova/policies/instance_actions.py
index 04dc9753e7..13586a8fbe 100644
--- a/nova/policies/instance_actions.py
+++ b/nova/policies/instance_actions.py
@@ -53,7 +53,7 @@ but in the other hand it might leak information about the deployment
'path': '/servers/{server_id}/os-instance-actions/{request_id}'
}
],
- scope_types=['system']),
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'events',
check_str=base.SYSTEM_READER,
@@ -70,7 +70,7 @@ passes, the name of the host.""",
'path': '/servers/{server_id}/os-instance-actions/{request_id}'
}
],
- scope_types=['system']),
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
check_str=base.PROJECT_READER_OR_SYSTEM_READER,
diff --git a/nova/policies/keypairs.py b/nova/policies/keypairs.py
index 5d228edb06..3feaa524bd 100644
--- a/nova/policies/keypairs.py
+++ b/nova/policies/keypairs.py
@@ -15,51 +15,56 @@
from oslo_policy import policy
+from nova.policies import base
POLICY_ROOT = 'os_compute_api:os-keypairs:%s'
keypairs_policies = [
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'index',
- 'rule:admin_api or user_id:%(user_id)s',
- "List all keypairs",
- [
+ name=POLICY_ROOT % 'index',
+ check_str='(' + base.SYSTEM_READER + ') or user_id:%(user_id)s',
+ description="List all keypairs",
+ operations=[
{
'path': '/os-keypairs',
'method': 'GET'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'create',
- 'rule:admin_api or user_id:%(user_id)s',
- "Create a keypair",
- [
+ name=POLICY_ROOT % 'create',
+ check_str='(' + base.SYSTEM_ADMIN + ') or user_id:%(user_id)s',
+ description="Create a keypair",
+ operations=[
{
'path': '/os-keypairs',
'method': 'POST'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'delete',
- 'rule:admin_api or user_id:%(user_id)s',
- "Delete a keypair",
- [
+ name=POLICY_ROOT % 'delete',
+ check_str='(' + base.SYSTEM_ADMIN + ') or user_id:%(user_id)s',
+ description="Delete a keypair",
+ operations=[
{
'path': '/os-keypairs/{keypair_name}',
'method': 'DELETE'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'show',
- 'rule:admin_api or user_id:%(user_id)s',
- "Show details of a keypair",
- [
+ name=POLICY_ROOT % 'show',
+ check_str='(' + base.SYSTEM_READER + ') or user_id:%(user_id)s',
+ description="Show details of a keypair",
+ operations=[
{
'path': '/os-keypairs/{keypair_name}',
'method': 'GET'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
]
diff --git a/nova/policies/quota_class_sets.py b/nova/policies/quota_class_sets.py
index e8691957a7..5a41a79bec 100644
--- a/nova/policies/quota_class_sets.py
+++ b/nova/policies/quota_class_sets.py
@@ -23,25 +23,27 @@ POLICY_ROOT = 'os_compute_api:os-quota-class-sets:%s'
quota_class_sets_policies = [
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'show',
- 'is_admin:True or quota_class:%(quota_class)s',
- "List quotas for specific quota classs",
- [
+ name=POLICY_ROOT % 'show',
+ check_str=base.SYSTEM_READER,
+ description="List quotas for specific quota classs",
+ operations=[
{
'method': 'GET',
'path': '/os-quota-class-sets/{quota_class}'
}
- ]),
+ ],
+ scope_types=['system']),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'update',
- base.RULE_ADMIN_API,
- 'Update quotas for specific quota class',
- [
+ name=POLICY_ROOT % 'update',
+ check_str=base.SYSTEM_ADMIN,
+ description='Update quotas for specific quota class',
+ operations=[
{
'method': 'PUT',
'path': '/os-quota-class-sets/{quota_class}'
}
- ]),
+ ],
+ scope_types=['system']),
]
diff --git a/nova/policies/quota_sets.py b/nova/policies/quota_sets.py
index 5e208b20ce..ac141a2c24 100644
--- a/nova/policies/quota_sets.py
+++ b/nova/policies/quota_sets.py
@@ -23,55 +23,60 @@ POLICY_ROOT = 'os_compute_api:os-quota-sets:%s'
quota_sets_policies = [
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'update',
- base.RULE_ADMIN_API,
- "Update the quotas",
- [
+ name=POLICY_ROOT % 'update',
+ check_str=base.SYSTEM_ADMIN,
+ description="Update the quotas",
+ operations=[
{
'method': 'PUT',
'path': '/os-quota-sets/{tenant_id}'
}
- ]),
+ ],
+ scope_types=['system']),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'defaults',
- base.RULE_ANY,
- "List default quotas",
- [
+ name=POLICY_ROOT % 'defaults',
+ check_str=base.RULE_ANY,
+ description="List default quotas",
+ operations=[
{
'method': 'GET',
'path': '/os-quota-sets/{tenant_id}/defaults'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'show',
- base.RULE_ADMIN_OR_OWNER,
- "Show a quota",
- [
+ name=POLICY_ROOT % 'show',
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ description="Show a quota",
+ operations=[
{
'method': 'GET',
'path': '/os-quota-sets/{tenant_id}'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'delete',
- base.RULE_ADMIN_API,
- "Revert quotas to defaults",
- [
+ name=POLICY_ROOT % 'delete',
+ check_str=base.SYSTEM_ADMIN,
+ description="Revert quotas to defaults",
+ operations=[
{
'method': 'DELETE',
'path': '/os-quota-sets/{tenant_id}'
}
- ]),
+ ],
+ scope_types=['system']),
policy.DocumentedRuleDefault(
- POLICY_ROOT % 'detail',
- base.RULE_ADMIN_OR_OWNER,
- "Show the detail of quota",
- [
+ name=POLICY_ROOT % 'detail',
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ description="Show the detail of quota",
+ operations=[
{
'method': 'GET',
'path': '/os-quota-sets/{tenant_id}/detail'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
]
diff --git a/nova/policies/rescue.py b/nova/policies/rescue.py
index 6e8c83807e..874e90884b 100644
--- a/nova/policies/rescue.py
+++ b/nova/policies/rescue.py
@@ -35,7 +35,7 @@ for unrescue and keeping old policy for rescue.
rescue_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.RULE_ADMIN_OR_OWNER,
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Rescue a server",
operations=[
{
@@ -46,7 +46,7 @@ rescue_policies = [
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=UNRESCUE_POLICY_NAME,
- check_str=base.RULE_ADMIN_OR_OWNER,
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Unrescue a server",
operations=[
{
diff --git a/nova/policies/server_external_events.py b/nova/policies/server_external_events.py
index 83da4f9615..bd9a21aea0 100644
--- a/nova/policies/server_external_events.py
+++ b/nova/policies/server_external_events.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-server-external-events:%s'
server_external_events_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_ADMIN,
description="Create one or more external events",
operations=[
{
diff --git a/nova/policies/server_groups.py b/nova/policies/server_groups.py
index f678213617..55176b8a6a 100644
--- a/nova/policies/server_groups.py
+++ b/nova/policies/server_groups.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-server-groups:%s'
server_groups_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.RULE_ADMIN_OR_OWNER,
+ check_str=base.PROJECT_MEMBER,
description="Create a new server group",
operations=[
{
@@ -32,11 +32,20 @@ server_groups_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project']
+ # (NOTE)gmann: Reason for 'project' only scope:
+ # POST SG need project_id to create the serve groups
+ # system scope members do not have project id for which
+ # SG needs to be created.
+ # If we allow system scope role also then created SG will have
+ # project_id of system role, not the one he/she wants to create the SG
+ # for (nobody can create the SG for other projects because API does
+ # not take project id in request ). So keeping this scoped to project
+ # only as these roles are the only ones who will be creating SG.
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.RULE_ADMIN_OR_OWNER,
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Delete a server group",
operations=[
{
@@ -48,7 +57,7 @@ server_groups_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.RULE_ADMIN_OR_OWNER,
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
description="List all server groups",
operations=[
{
@@ -59,8 +68,20 @@ server_groups_policies = [
scope_types=['system', 'project']
),
policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'index:all_projects',
+ check_str=base.SYSTEM_READER,
+ description="List all server groups for all projects",
+ operations=[
+ {
+ 'path': '/os-server-groups',
+ 'method': 'GET'
+ }
+ ],
+ scope_types=['system']
+ ),
+ policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.RULE_ADMIN_OR_OWNER,
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
description="Show details of a server group",
operations=[
{
diff --git a/nova/policies/server_topology.py b/nova/policies/server_topology.py
index 8e7152d2ea..4ceee3e418 100644
--- a/nova/policies/server_topology.py
+++ b/nova/policies/server_topology.py
@@ -20,27 +20,29 @@ BASE_POLICY_NAME = 'compute:server:topology:%s'
server_topology_policies = [
policy.DocumentedRuleDefault(
- BASE_POLICY_NAME % 'index',
- base.RULE_ADMIN_OR_OWNER,
- "Show the NUMA topology data for a server",
- [
+ name=BASE_POLICY_NAME % 'index',
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ description="Show the NUMA topology data for a server",
+ operations=[
{
'method': 'GET',
'path': '/servers/{server_id}/topology'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
# Control host NUMA node and cpu pinning information
- BASE_POLICY_NAME % 'host:index',
- base.RULE_ADMIN_API,
- "Show the NUMA topology data for a server with host NUMA ID and CPU "
- "pinning information",
- [
+ name=BASE_POLICY_NAME % 'host:index',
+ check_str=base.SYSTEM_READER,
+ description="Show the NUMA topology data for a server with host"
+ "NUMA ID and CPU pinning information",
+ operations=[
{
'method': 'GET',
'path': '/servers/{server_id}/topology'
}
- ]),
+ ],
+ scope_types=['system']),
]
diff --git a/nova/policies/servers.py b/nova/policies/servers.py
index 5354d3bcbb..811030bdb7 100644
--- a/nova/policies/servers.py
+++ b/nova/policies/servers.py
@@ -25,50 +25,55 @@ CROSS_CELL_RESIZE = 'compute:servers:resize:cross_cell'
rules = [
policy.DocumentedRuleDefault(
- SERVERS % 'index',
- RULE_AOO,
- "List all servers",
- [
+ name=SERVERS % 'index',
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ description="List all servers",
+ operations=[
{
'method': 'GET',
'path': '/servers'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'detail',
- RULE_AOO,
- "List all servers with detailed information",
- [
+ name=SERVERS % 'detail',
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ description="List all servers with detailed information",
+ operations=[
{
'method': 'GET',
'path': '/servers/detail'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'index:get_all_tenants',
- base.RULE_ADMIN_API,
- "List all servers for all projects",
- [
+ name=SERVERS % 'index:get_all_tenants',
+ check_str=base.SYSTEM_READER,
+ description="List all servers for all projects",
+ operations=[
{
'method': 'GET',
'path': '/servers'
}
- ]),
+ ],
+ scope_types=['system']),
policy.DocumentedRuleDefault(
- SERVERS % 'detail:get_all_tenants',
- base.RULE_ADMIN_API,
- "List all servers with detailed information for all projects",
- [
+ name=SERVERS % 'detail:get_all_tenants',
+ check_str=base.SYSTEM_READER,
+ description="List all servers with detailed information for "
+ " all projects",
+ operations=[
{
'method': 'GET',
'path': '/servers/detail'
}
- ]),
+ ],
+ scope_types=['system']),
policy.DocumentedRuleDefault(
- SERVERS % 'allow_all_filters',
- base.RULE_ADMIN_API,
- "Allow all filters when listing servers",
- [
+ name=SERVERS % 'allow_all_filters',
+ check_str=base.SYSTEM_READER,
+ description="Allow all filters when listing servers",
+ operations=[
{
'method': 'GET',
'path': '/servers'
@@ -77,23 +82,25 @@ rules = [
'method': 'GET',
'path': '/servers/detail'
}
- ]),
+ ],
+ scope_types=['system']),
policy.DocumentedRuleDefault(
- SERVERS % 'show',
- RULE_AOO,
- "Show a server",
- [
+ name=SERVERS % 'show',
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ description="Show a server",
+ operations=[
{
'method': 'GET',
'path': '/servers/{server_id}'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
# the details in host_status are pretty sensitive, only admins
# should do that by default.
policy.DocumentedRuleDefault(
- SERVERS % 'show:host_status',
- base.RULE_ADMIN_API,
- """
+ name=SERVERS % 'show:host_status',
+ check_str=base.SYSTEM_ADMIN,
+ description="""
Show a server with additional host status information.
This means host_status will be shown irrespective of status value. If showing
@@ -105,7 +112,7 @@ Microvision 2.75 added the ``host_status`` attribute in the
API responses which are also controlled by this policy rule, like the
``GET /servers*`` APIs.
""",
- [
+ operations=[
{
'method': 'GET',
'path': '/servers/{server_id}'
@@ -122,11 +129,12 @@ API responses which are also controlled by this policy rule, like the
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'show:host_status:unknown-only',
- base.RULE_ADMIN_API,
- """
+ name=SERVERS % 'show:host_status:unknown-only',
+ check_str=base.SYSTEM_ADMIN,
+ description="""
Show a server with additional host status information, only if host status is
UNKNOWN.
@@ -137,7 +145,7 @@ request. An example policy configuration could be where the
the ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to
allow everyone.
""",
- [
+ operations=[
{
'method': 'GET',
'path': '/servers/{server_id}'
@@ -145,38 +153,59 @@ allow everyone.
{
'method': 'GET',
'path': '/servers/detail'
+ },
+ {
+ 'method': 'PUT',
+ 'path': '/servers/{server_id}'
+ },
+ {
+ 'method': 'POST',
+ 'path': '/servers/{server_id}/action (rebuild)'
}
- ]),
+ ],
+ scope_types=['system', 'project'],),
policy.DocumentedRuleDefault(
- SERVERS % 'create',
- RULE_AOO,
- "Create a server",
- [
+ name=SERVERS % 'create',
+ check_str=base.PROJECT_MEMBER,
+ description="Create a server",
+ operations=[
{
'method': 'POST',
'path': '/servers'
}
- ]),
+ ],
+ scope_types=['project']),
policy.DocumentedRuleDefault(
- SERVERS % 'create:forced_host',
- base.RULE_ADMIN_API,
- """
+ name=SERVERS % 'create:forced_host',
+ # TODO(gmann): We need to make it SYSTEM_ADMIN.
+ # PROJECT_ADMIN is added for now because create server
+ # policy is project scoped and there is no way to
+ # pass the project_id in request body for system scoped
+ # roles so that create server for other project with force host.
+ # To achieve that, we need to update the create server API to
+ # accept the project_id for whom the server needs to be created
+ # and then change the scope of this policy to system-only
+ # Because that is API change it needs to be done with new
+ # microversion.
+ check_str=base.PROJECT_ADMIN,
+ description="""
Create a server on the specified host and/or node.
In this case, the server is forced to launch on the specified
host and/or node by bypassing the scheduler filters unlike the
``compute:servers:create:requested_destination`` rule.
""",
- [
+ operations=[
{
'method': 'POST',
'path': '/servers'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- REQUESTED_DESTINATION,
- base.RULE_ADMIN_API,
- """
+ name=REQUESTED_DESTINATION,
+ check_str=base.RULE_ADMIN_API,
+ description="""
Create a server on the requested compute service host and/or
hypervisor_hostname.
@@ -184,46 +213,61 @@ In this case, the requested host and/or hypervisor_hostname is
validated by the scheduler filters unlike the
``os_compute_api:servers:create:forced_host`` rule.
""",
- [
+ operations=[
{
'method': 'POST',
'path': '/servers'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'create:attach_volume',
- RULE_AOO,
- "Create a server with the requested volume attached to it",
- [
+ name=SERVERS % 'create:attach_volume',
+ check_str=base.PROJECT_MEMBER,
+ description="Create a server with the requested volume attached to it",
+ operations=[
{
'method': 'POST',
'path': '/servers'
}
- ]),
+ ],
+ scope_types=['project']),
policy.DocumentedRuleDefault(
- SERVERS % 'create:attach_network',
- RULE_AOO,
- "Create a server with the requested network attached to it",
- [
+ name=SERVERS % 'create:attach_network',
+ check_str=base.PROJECT_MEMBER,
+ description="Create a server with the requested network attached "
+ " to it",
+ operations=[
{
'method': 'POST',
'path': '/servers'
}
- ]),
+ ],
+ scope_types=['project']),
policy.DocumentedRuleDefault(
- SERVERS % 'create:trusted_certs',
- RULE_AOO,
- "Create a server with trusted image certificate IDs",
- [
+ name=SERVERS % 'create:trusted_certs',
+ check_str=base.PROJECT_MEMBER,
+ description="Create a server with trusted image certificate IDs",
+ operations=[
{
'method': 'POST',
'path': '/servers'
}
- ]),
+ ],
+ scope_types=['project']),
policy.DocumentedRuleDefault(
- ZERO_DISK_FLAVOR,
- base.RULE_ADMIN_API,
- """
+ name=ZERO_DISK_FLAVOR,
+ # TODO(gmann): We need to make it SYSTEM_ADMIN.
+ # PROJECT_ADMIN is added for now because create server
+ # policy is project scoped and there is no way to
+ # pass the project_id in request body for system scoped
+ # roles so that create server for other project with zero disk flavor.
+ # To achieve that, we need to update the create server API to
+ # accept the project_id for whom the server needs to be created
+ # and then change the scope of this policy to system-only
+ # Because that is API change it needs to be done with new
+ # microversion.
+ check_str=base.PROJECT_ADMIN,
+ description="""
This rule controls the compute API validation behavior of creating a server
with a flavor that has 0 disk, indicating the server should be volume-backed.
@@ -238,17 +282,28 @@ create a disk=0 flavor instance with a large image can exhaust
the local disk of the compute (or shared storage cluster). See bug
https://bugs.launchpad.net/nova/+bug/1739646 for details.
""",
- [
+ operations=[
{
'method': 'POST',
'path': '/servers'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- NETWORK_ATTACH_EXTERNAL,
- 'is_admin:True',
- "Attach an unshared external network to a server",
- [
+ name=NETWORK_ATTACH_EXTERNAL,
+ # TODO(gmann): We need to make it SYSTEM_ADMIN.
+ # PROJECT_ADMIN is added for now because create server
+ # policy is project scoped and there is no way to
+ # pass the project_id in request body for system scoped
+ # roles so that create server for other project or attach the
+ # external network. To achieve that, we need to update the
+ # create server API to accept the project_id for whom the
+ # server needs to be created and then change the scope of this
+ # policy to system-only Because that is API change it needs to
+ # be done with new microversion.
+ check_str=base.PROJECT_ADMIN,
+ description="Attach an unshared external network to a server",
+ operations=[
# Create a server with a requested network or port.
{
'method': 'POST',
@@ -259,150 +314,166 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'method': 'POST',
'path': '/servers/{server_id}/os-interface'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'delete',
- RULE_AOO,
- "Delete a server",
- [
+ name=SERVERS % 'delete',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Delete a server",
+ operations=[
{
'method': 'DELETE',
'path': '/servers/{server_id}'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'update',
- RULE_AOO,
- "Update a server",
- [
+ name=SERVERS % 'update',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Update a server",
+ operations=[
{
'method': 'PUT',
'path': '/servers/{server_id}'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'confirm_resize',
- RULE_AOO,
- "Confirm a server resize",
- [
+ name=SERVERS % 'confirm_resize',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Confirm a server resize",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (confirmResize)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'revert_resize',
- RULE_AOO,
- "Revert a server resize",
- [
+ name=SERVERS % 'revert_resize',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Revert a server resize",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (revertResize)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'reboot',
- RULE_AOO,
- "Reboot a server",
- [
+ name=SERVERS % 'reboot',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Reboot a server",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (reboot)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'resize',
- RULE_AOO,
- "Resize a server",
- [
+ name=SERVERS % 'resize',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Resize a server",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- CROSS_CELL_RESIZE,
- base.RULE_NOBODY,
- "Resize a server across cells. By default, this is disabled for all "
- "users and recommended to be tested in a deployment for admin users "
- "before opening it up to non-admin users. Resizing within a cell is "
- "the default preferred behavior even if this is enabled. ",
- [
+ name=CROSS_CELL_RESIZE,
+ check_str=base.RULE_NOBODY,
+ description="Resize a server across cells. By default, this is "
+ "disabled for all users and recommended to be tested in a "
+ "deployment for admin users before opening it up to non-admin users. "
+ "Resizing within a cell is the default preferred behavior even if "
+ "this is enabled. ",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'rebuild',
- RULE_AOO,
- "Rebuild a server",
- [
+ name=SERVERS % 'rebuild',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Rebuild a server",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'rebuild:trusted_certs',
- RULE_AOO,
- "Rebuild a server with trusted image certificate IDs",
- [
+ name=SERVERS % 'rebuild:trusted_certs',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Rebuild a server with trusted image certificate IDs",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'create_image',
- RULE_AOO,
- "Create an image from a server",
- [
+ name=SERVERS % 'create_image',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Create an image from a server",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'create_image:allow_volume_backed',
- RULE_AOO,
- "Create an image from a volume backed server",
- [
+ name=SERVERS % 'create_image:allow_volume_backed',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Create an image from a volume backed server",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'start',
- RULE_AOO,
- "Start a server",
- [
+ name=SERVERS % 'start',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Start a server",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-start)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'stop',
- RULE_AOO,
- "Stop a server",
- [
+ name=SERVERS % 'stop',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Stop a server",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-stop)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
- SERVERS % 'trigger_crash_dump',
- RULE_AOO,
- "Trigger crash dump in a server",
- [
+ name=SERVERS % 'trigger_crash_dump',
+ check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ description="Trigger crash dump in a server",
+ operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (trigger_crash_dump)'
}
- ]),
+ ],
+ scope_types=['system', 'project']),
]
diff --git a/nova/policies/simple_tenant_usage.py b/nova/policies/simple_tenant_usage.py
index 22f3ab6f21..85ebffbb30 100644
--- a/nova/policies/simple_tenant_usage.py
+++ b/nova/policies/simple_tenant_usage.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-simple-tenant-usage:%s'
simple_tenant_usage_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.RULE_ADMIN_OR_OWNER,
+ check_str=base.PROJECT_READER_OR_SYSTEM_READER,
description="Show usage statistics for a specific tenant",
operations=[
{
@@ -35,7 +35,7 @@ simple_tenant_usage_policies = [
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.RULE_ADMIN_API,
+ check_str=base.SYSTEM_READER,
description="List per tenant usage statistics for all tenants",
operations=[
{
diff --git a/nova/policy.py b/nova/policy.py
index 20aeb1db6a..613df50463 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -49,10 +49,8 @@ def reset():
_ENFORCER = None
-# TODO(gmann): Make suppress_deprecation_warnings default to False, once
-# we find the way to disable warning for default change on oslo side.
def init(policy_file=None, rules=None, default_rule=None, use_conf=True,
- suppress_deprecation_warnings=True):
+ suppress_deprecation_warnings=False):
"""Init an Enforcer class.
:param policy_file: Custom policy file to use, if none is specified,
@@ -75,6 +73,13 @@ def init(policy_file=None, rules=None, default_rule=None, use_conf=True,
rules=rules,
default_rule=default_rule,
use_conf=use_conf)
+ # NOTE(gmann): Explictly disable the warnings for policies
+ # changing their default check_str. During policy-defaults-refresh
+ # work, all the policy defaults have been changed and warning for
+ # each policy started filling the logs limit for various tool.
+ # Once we move to new defaults only world then we can enable these
+ # warning again.
+ _ENFORCER.suppress_default_change_warnings = True
if suppress_deprecation_warnings:
_ENFORCER.suppress_deprecation_warnings = True
register_rules(_ENFORCER)
diff --git a/nova/privsep/qemu.py b/nova/privsep/qemu.py
index 2b2acfd3e8..529c24faf3 100644
--- a/nova/privsep/qemu.py
+++ b/nova/privsep/qemu.py
@@ -82,17 +82,16 @@ def unprivileged_convert_image(source, dest, in_format, out_format,
@nova.privsep.sys_admin_pctxt.entrypoint
-def privileged_qemu_img_info(path, format=None, output_format=None):
+def privileged_qemu_img_info(path, format=None):
"""Return an oject containing the parsed output from qemu-img info
This is a privileged call to qemu-img info using the sys_admin_pctxt
entrypoint allowing host block devices etc to be accessed.
"""
- return unprivileged_qemu_img_info(
- path, format=format, output_format=output_format)
+ return unprivileged_qemu_img_info(path, format=format)
-def unprivileged_qemu_img_info(path, format=None, output_format=None):
+def unprivileged_qemu_img_info(path, format=None):
"""Return an object containing the parsed output from qemu-img info."""
try:
# The following check is about ploop images that reside within
@@ -103,12 +102,10 @@ def unprivileged_qemu_img_info(path, format=None, output_format=None):
cmd = (
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share',
+ '--force-share', '--output=json',
)
if format is not None:
cmd = cmd + ('-f', format)
- if output_format is not None:
- cmd = cmd + ("--output=%s" % (output_format),)
out, err = processutils.execute(*cmd, prlimit=QEMU_IMG_LIMITS)
except processutils.ProcessExecutionError as exp:
if exp.exit_code == -9:
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-get-resp-rescue.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-get-resp-rescue.json.tpl
new file mode 100644
index 0000000000..34343fe8c2
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-get-resp-rescue.json.tpl
@@ -0,0 +1,87 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "%(reservation_id)s",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 4,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "rescued",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "RESCUE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json.tpl
new file mode 100644
index 0000000000..02ac1caa9d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json.tpl
@@ -0,0 +1,88 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "%(reservation_id)s",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "progress": 0
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json.tpl
new file mode 100644
index 0000000000..8a4ad0d52a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json.tpl
@@ -0,0 +1,6 @@
+{
+ "rescue": {
+ "adminPass": "MySecretPass",
+ "rescue_image_ref": "70a599e0-31e7-49b7-b260-868f441e862b"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue-req.json.tpl
new file mode 100644
index 0000000000..f946b74f53
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "rescue": {
+ "adminPass": "%(password)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue.json.tpl
new file mode 100644
index 0000000000..0da07da5b8
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-rescue.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-unrescue-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-unrescue-req.json.tpl
new file mode 100644
index 0000000000..cafc9b13a8
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-rescue/v2.87/server-unrescue-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "unrescue": null
+} \ No newline at end of file
diff --git a/nova/tests/functional/api_sample_tests/test_keypairs.py b/nova/tests/functional/api_sample_tests/test_keypairs.py
index c67384cd0c..eab88f61e1 100644
--- a/nova/tests/functional/api_sample_tests/test_keypairs.py
+++ b/nova/tests/functional/api_sample_tests/test_keypairs.py
@@ -230,6 +230,9 @@ class KeyPairsV210SampleJsonTestNotAdmin(KeyPairsV210SampleJsonTest):
user_id="fake")
def test_keypairs_post_for_other_user(self):
+ rules = {'os_compute_api:os-keypairs:create':
+ 'rule:admin_api or user_id:%(user_id)s'}
+ self.policy.set_rules(rules, overwrite=False)
key_name = 'keypair-' + uuids.fake
subs = dict(keypair_name=key_name,
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
@@ -240,6 +243,9 @@ class KeyPairsV210SampleJsonTestNotAdmin(KeyPairsV210SampleJsonTest):
def test_keypairs_list_for_different_users(self):
# get and post for other users is forbidden for non admin
+ rules = {'os_compute_api:os-keypairs:index':
+ 'rule:admin_api or user_id:%(user_id)s'}
+ self.policy.set_rules(rules, overwrite=False)
response = self._do_get('os-keypairs?user_id=fake1')
self.assertEqual(403, response.status_code)
diff --git a/nova/tests/functional/api_sample_tests/test_rescue.py b/nova/tests/functional/api_sample_tests/test_rescue.py
index 3c5545c297..06ed1d2c32 100644
--- a/nova/tests/functional/api_sample_tests/test_rescue.py
+++ b/nova/tests/functional/api_sample_tests/test_rescue.py
@@ -92,3 +92,9 @@ class RescueJsonTest(test_servers.ServersSampleBase):
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['cdrive'] = '.*'
self._verify_response('server-get-resp-unrescue', subs, response, 200)
+
+
+class Rescuev287JsonTest(RescueJsonTest):
+ """2.87 adds support for rescuing boot from volume instances"""
+ microversion = '2.87'
+ scenarios = [('v2_87', {'api_major_version': 'v2.1'})]
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index 041247b80d..4026e7375b 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -370,6 +370,43 @@ class InstanceHelperMixin(object):
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
+ def _confirm_resize(self, server):
+ self.api.post_server_action(server['id'], {'confirmResize': None})
+ server = self._wait_for_state_change(server, 'ACTIVE')
+ self._wait_for_instance_action_event(
+ server, instance_actions.CONFIRM_RESIZE,
+ 'compute_confirm_resize', 'success')
+ return server
+
+ def _revert_resize(self, server):
+ # NOTE(sbauza): This method requires the caller to setup a fake
+ # notifier by stubbing it.
+ self.api.post_server_action(server['id'], {'revertResize': None})
+ server = self._wait_for_state_change(server, 'ACTIVE')
+ self._wait_for_migration_status(server, ['reverted'])
+ # Note that the migration status is changed to "reverted" in the
+ # dest host revert_resize method but the allocations are cleaned up
+ # in the source host finish_revert_resize method so we need to wait
+ # for the finish_revert_resize method to complete.
+ fake_notifier.wait_for_versioned_notifications(
+ 'instance.resize_revert.end')
+ return server
+
+ def _migrate_or_resize(self, server, request):
+ if not ('resize' in request or 'migrate' in request):
+ raise Exception('_migrate_or_resize only supports resize or '
+ 'migrate requests.')
+ self.api.post_server_action(server['id'], request)
+ self._wait_for_state_change(server, 'VERIFY_RESIZE')
+
+ def _resize_server(self, server, new_flavor):
+ resize_req = {
+ 'resize': {
+ 'flavorRef': new_flavor
+ }
+ }
+ self._migrate_or_resize(server, resize_req)
+
class _IntegratedTestBase(test.TestCase, InstanceHelperMixin):
REQUIRES_LOCKING = True
@@ -393,6 +430,9 @@ class _IntegratedTestBase(test.TestCase, InstanceHelperMixin):
self.placement_api = placement.api
self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
+ fake_notifier.stub_notifier(self)
+ self.addCleanup(fake_notifier.reset)
+
self._setup_services()
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
@@ -527,6 +567,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
os_traits.COMPUTE_IMAGE_TYPE_ISO,
os_traits.COMPUTE_IMAGE_TYPE_QCOW2,
os_traits.COMPUTE_IMAGE_TYPE_RAW,
+ os_traits.COMPUTE_RESCUE_BFV,
]
])
@@ -914,8 +955,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
def _move_and_check_allocations(self, server, request, old_flavor,
new_flavor, source_rp_uuid, dest_rp_uuid):
- self.api.post_server_action(server['id'], request)
- self._wait_for_state_change(server, 'VERIFY_RESIZE')
+ self._migrate_or_resize(server, request)
def _check_allocation():
self.assertFlavorMatchesUsage(source_rp_uuid, old_flavor)
@@ -971,13 +1011,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
# Resize the server to the same host and check usages in VERIFY_RESIZE
# state
self.flags(allow_resize_to_same_host=True)
- resize_req = {
- 'resize': {
- 'flavorRef': new_flavor['id']
- }
- }
- self.api.post_server_action(server['id'], resize_req)
- self._wait_for_state_change(server, 'VERIFY_RESIZE')
+ self._resize_server(server, new_flavor['id'])
self.assertFlavorMatchesUsage(rp_uuid, old_flavor, new_flavor)
@@ -1044,23 +1078,3 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
# Account for reserved_host_cpus.
expected_vcpu_usage = CONF.reserved_host_cpus + flavor['vcpus']
self.assertEqual(expected_vcpu_usage, hypervisor['vcpus_used'])
-
- def _confirm_resize(self, server):
- self.api.post_server_action(server['id'], {'confirmResize': None})
- server = self._wait_for_state_change(server, 'ACTIVE')
- self._wait_for_instance_action_event(
- server, instance_actions.CONFIRM_RESIZE,
- 'compute_confirm_resize', 'success')
- return server
-
- def _revert_resize(self, server):
- self.api.post_server_action(server['id'], {'revertResize': None})
- server = self._wait_for_state_change(server, 'ACTIVE')
- self._wait_for_migration_status(server, ['reverted'])
- # Note that the migration status is changed to "reverted" in the
- # dest host revert_resize method but the allocations are cleaned up
- # in the source host finish_revert_resize method so we need to wait
- # for the finish_revert_resize method to complete.
- fake_notifier.wait_for_versioned_notifications(
- 'instance.resize_revert.end')
- return server
diff --git a/nova/tests/functional/libvirt/test_vgpu.py b/nova/tests/functional/libvirt/test_vgpu.py
index dd4f81e0bc..9bd1c12b3b 100644
--- a/nova/tests/functional/libvirt/test_vgpu.py
+++ b/nova/tests/functional/libvirt/test_vgpu.py
@@ -14,6 +14,7 @@
import fixtures
import re
+import mock
import os_resource_classes as orc
from oslo_config import cfg
from oslo_log import log as logging
@@ -24,17 +25,25 @@ from nova import context
from nova import objects
from nova.tests.functional.libvirt import base
from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+_DEFAULT_HOST = 'host1'
+
+
class VGPUTestBase(base.ServersTestBase):
FAKE_LIBVIRT_VERSION = 5000000
FAKE_QEMU_VERSION = 3001000
+ # Since we run all computes by a single process, we need to identify which
+ # current compute service we use at the moment.
+ _current_host = _DEFAULT_HOST
+
def setUp(self):
super(VGPUTestBase, self).setUp()
self.useFixture(fixtures.MockPatch(
@@ -45,6 +54,25 @@ class VGPUTestBase(base.ServersTestBase):
self.useFixture(fixtures.MockPatch(
'nova.privsep.libvirt.create_mdev',
side_effect=self._create_mdev))
+
+ # NOTE(sbauza): Since the fake create_mdev doesn't know which compute
+ # was called, we need to look at a value that can be provided just
+ # before the driver calls create_mdev. That's why we fake the below
+ # method for having the LibvirtDriver instance so we could modify
+ # the self.current_host value.
+ orig_get_vgpu_type_per_pgpu = (
+ libvirt_driver.LibvirtDriver._get_vgpu_type_per_pgpu)
+
+ def fake_get_vgpu_type_per_pgpu(_self, *args):
+ # See, here we look at the hostname from the virt driver...
+ self._current_host = _self._host.get_hostname()
+ # ... and then we call the original method
+ return orig_get_vgpu_type_per_pgpu(_self, *args)
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_vgpu_type_per_pgpu',
+ new=fake_get_vgpu_type_per_pgpu))
+
self.context = context.get_admin_context()
def pci2libvirt_address(self, address):
@@ -61,14 +89,18 @@ class VGPUTestBase(base.ServersTestBase):
uuid = uuidutils.generate_uuid()
mdev_name = libvirt_utils.mdev_uuid2name(uuid)
libvirt_parent = self.pci2libvirt_address(physical_device)
- self.fake_connection.mdev_info.devices.update(
+ # Here, we get the right compute thanks by the self.current_host that
+ # was modified just before
+ connection = self.computes[
+ self._current_host].driver._host.get_connection()
+ connection.mdev_info.devices.update(
{mdev_name: fakelibvirt.FakeMdevDevice(dev_name=mdev_name,
type_id=mdev_type,
parent=libvirt_parent)})
return uuid
def _start_compute_service(self, hostname):
- self.fake_connection = self._get_connection(
+ fake_connection = self._get_connection(
host_info=fakelibvirt.HostInfo(cpu_nodes=2, kB_mem=8192),
# We want to create two pGPUs but no other PCI devices
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=0,
@@ -76,9 +108,11 @@ class VGPUTestBase(base.ServersTestBase):
num_vfs=0,
num_mdevcap=2),
hostname=hostname)
-
- self.mock_conn.return_value = self.fake_connection
- compute = self.start_service('compute', host=hostname)
+ with mock.patch('nova.virt.libvirt.host.Host.get_connection',
+ return_value=fake_connection):
+ # this method will update a self.computes dict keyed by hostname
+ compute = self._start_compute(hostname)
+ compute.driver._host.get_connection = lambda: fake_connection
rp_uuid = self._get_provider_uuid_by_name(hostname)
rp_uuids = self._get_all_rp_uuids_in_a_tree(rp_uuid)
for rp in rp_uuids:
@@ -94,6 +128,11 @@ class VGPUTestBase(base.ServersTestBase):
class VGPUTests(VGPUTestBase):
+ # We want to target some hosts for some created instances
+ api_major_version = 'v2.1'
+ ADMIN_API = True
+ microversion = 'latest'
+
def setUp(self):
super(VGPUTests, self).setUp()
extra_spec = {"resources:VGPU": "1"}
@@ -103,23 +142,86 @@ class VGPUTests(VGPUTestBase):
self.flags(
enabled_vgpu_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
group='devices')
- self.compute1 = self._start_compute_service('host1')
+
+ # for the sake of resizing, we need to patch the two methods below
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_instance_disk_info',
+ return_value=[]))
+ self.useFixture(fixtures.MockPatch('os.rename'))
+
+ self.compute1 = self._start_compute_service(_DEFAULT_HOST)
+
+ def assert_vgpu_usage_for_compute(self, compute, expected):
+ total_usage = 0
+ # We only want to get mdevs that are assigned to instances
+ mdevs = compute.driver._get_all_assigned_mediated_devices()
+ for mdev in mdevs:
+ mdev_name = libvirt_utils.mdev_uuid2name(mdev)
+ mdev_info = compute.driver._get_mediated_device_information(
+ mdev_name)
+ parent_name = mdev_info['parent']
+ parent_rp_name = compute.host + '_' + parent_name
+ parent_rp_uuid = self._get_provider_uuid_by_name(parent_rp_name)
+ parent_usage = self._get_provider_usages(parent_rp_uuid)
+ if orc.VGPU in parent_usage:
+ total_usage += parent_usage[orc.VGPU]
+ self.assertEqual(expected, len(mdevs))
+ self.assertEqual(expected, total_usage)
def test_create_servers_with_vgpu(self):
self._create_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor, host=self.compute1.host,
- expected_state='ACTIVE')
- # Now we should find a new mdev
- mdevs = self.compute1.driver._get_mediated_devices()
- self.assertEqual(1, len(mdevs))
+ networks='auto', expected_state='ACTIVE')
+ self.assert_vgpu_usage_for_compute(self.compute1, expected=1)
+
+ def _confirm_resize(self, server, host='host1'):
+ # NOTE(sbauza): Unfortunately, _cleanup_resize() in libvirt checks the
+ # host option to know the source hostname but given we have a global
+ # CONF, the value will be the hostname of the last compute service that
+ # was created, so we need to change it here.
+ # TODO(sbauza): Remove the below once we stop using CONF.host in
+ # libvirt and rather looking at the compute host value.
+ orig_host = CONF.host
+ self.flags(host=host)
+ super(VGPUTests, self)._confirm_resize(server)
+ self.flags(host=orig_host)
+ self._wait_for_state_change(server, 'ACTIVE')
+
+ def test_resize_servers_with_vgpu(self):
+ # Add another compute for the sake of resizing
+ self.compute2 = self._start_compute_service('host2')
+ server = self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+ # Make sure we only have 1 vGPU for compute1
+ self.assert_vgpu_usage_for_compute(self.compute1, expected=1)
+ self.assert_vgpu_usage_for_compute(self.compute2, expected=0)
- # Checking also the allocations for the parent pGPU
- parent_name = mdevs[0]['parent']
- parent_rp_name = self.compute1.host + '_' + parent_name
- parent_rp_uuid = self._get_provider_uuid_by_name(parent_rp_name)
- usage = self._get_provider_usages(parent_rp_uuid)
- self.assertEqual(1, usage[orc.VGPU])
+ extra_spec = {"resources:VGPU": "1"}
+ new_flavor = self._create_flavor(memory_mb=4096,
+ extra_spec=extra_spec)
+ # First, resize and then revert.
+ self._resize_server(server, new_flavor)
+ # After resizing, we then have two vGPUs, both for each compute
+ self.assert_vgpu_usage_for_compute(self.compute1, expected=1)
+ self.assert_vgpu_usage_for_compute(self.compute2, expected=1)
+
+ self._revert_resize(server)
+ # We're back to the original resources usage
+ self.assert_vgpu_usage_for_compute(self.compute1, expected=1)
+ self.assert_vgpu_usage_for_compute(self.compute2, expected=0)
+
+ # Now resize and then confirm it.
+ self._resize_server(server, new_flavor)
+ self.assert_vgpu_usage_for_compute(self.compute1, expected=1)
+ self.assert_vgpu_usage_for_compute(self.compute2, expected=1)
+
+ self._confirm_resize(server)
+ # In the last case, the source guest disappeared so we only have 1 vGPU
+ self.assert_vgpu_usage_for_compute(self.compute1, expected=0)
+ self.assert_vgpu_usage_for_compute(self.compute2, expected=1)
class VGPUMultipleTypesTests(VGPUTestBase):
@@ -180,7 +282,9 @@ class VGPUMultipleTypesTests(VGPUTestBase):
def test_create_servers_with_specific_type(self):
# Regenerate the PCI addresses so both pGPUs now support nvidia-12
- self.fake_connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
+ connection = self.computes[
+ self.compute1.host].driver._host.get_connection()
+ connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
multiple_gpu_types=True)
# Make a restart to update the Resource Providers
diff --git a/nova/tests/functional/test_flavor_extraspecs.py b/nova/tests/functional/test_flavor_extraspecs.py
index cb8afe9a9d..d158067e6d 100644
--- a/nova/tests/functional/test_flavor_extraspecs.py
+++ b/nova/tests/functional/test_flavor_extraspecs.py
@@ -14,8 +14,6 @@
"""Tests for os-extra_specs API."""
-import testtools
-
from nova.tests.functional.api import client as api_client
from nova.tests.functional import integrated_helpers
@@ -43,7 +41,7 @@ class FlavorExtraSpecsTest(integrated_helpers._IntegratedTestBase):
This should pass because validation is not enabled in this API
microversion.
"""
- body = {'extra_specs': {'hw:numa_nodes': '1', 'foo': 'bar'}}
+ body = {'extra_specs': {'hw:numa_nodes': 'foo', 'foo': 'bar'}}
self.admin_api.post_extra_spec(self.flavor_id, body)
self.assertEqual(
body['extra_specs'], self.admin_api.get_extra_specs(self.flavor_id)
@@ -64,8 +62,8 @@ class FlavorExtraSpecsTest(integrated_helpers._IntegratedTestBase):
This should pass because validation is not enabled in this API
microversion.
"""
- spec_id = 'foo:bar'
- body = {'foo:bar': 'baz'}
+ spec_id = 'hw:foo'
+ body = {'hw:foo': 'bar'}
self.admin_api.put_extra_spec(self.flavor_id, spec_id, body)
self.assertEqual(
body, self.admin_api.get_extra_spec(self.flavor_id, spec_id)
@@ -82,11 +80,12 @@ class FlavorExtraSpecsV286Test(FlavorExtraSpecsTest):
# this should fail because 'foo' is not a suitable value for
# 'hw:numa_nodes'
- with testtools.ExpectedException(
- api_client.OpenStackApiException
- ) as exc:
- self.admin_api.post_extra_spec(self.flavor_id, body)
- self.assertEqual(400, exc.response.status_code)
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.admin_api.post_extra_spec,
+ self.flavor_id, body,
+ )
+ self.assertEqual(400, exc.response.status_code)
# ...and the extra specs should not be saved
self.assertEqual({}, self.admin_api.get_extra_specs(self.flavor_id))
@@ -102,11 +101,12 @@ class FlavorExtraSpecsV286Test(FlavorExtraSpecsTest):
body = {'extra_specs': {'hw:numa_nodes': '1', 'hw:foo': 'bar'}}
# ...but this should fail because we do recognize the namespace
- with testtools.ExpectedException(
- api_client.OpenStackApiException
- ) as exc:
- self.admin_api.post_extra_spec(self.flavor_id, body)
- self.assertEqual(400, exc.response.status_code)
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.admin_api.post_extra_spec,
+ self.flavor_id, body,
+ )
+ self.assertEqual(400, exc.response.status_code)
def test_update_invalid_spec(self):
"""Test updating extra specs with invalid specs."""
@@ -114,21 +114,23 @@ class FlavorExtraSpecsV286Test(FlavorExtraSpecsTest):
body = {'hw:foo': 'bar'}
# this should fail because we don't recognize the extra spec
- with testtools.ExpectedException(
- api_client.OpenStackApiException
- ) as exc:
- self.admin_api.put_extra_spec(self.flavor_id, spec_id, body)
- self.assertEqual(400, exc.response.status_code)
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.admin_api.put_extra_spec,
+ self.flavor_id, spec_id, body,
+ )
+ self.assertEqual(400, exc.response.status_code)
spec_id = 'hw:numa_nodes'
body = {'hw:numa_nodes': 'foo'}
# ...while this should fail because the value is not valid
- with testtools.ExpectedException(
- api_client.OpenStackApiException
- ) as exc:
- self.admin_api.put_extra_spec(self.flavor_id, spec_id, body)
- self.assertEqual(400, exc.response.status_code)
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.admin_api.put_extra_spec,
+ self.flavor_id, spec_id, body,
+ )
+ self.assertEqual(400, exc.response.status_code)
# ...and neither extra spec should be saved
self.assertEqual({}, self.admin_api.get_extra_specs(self.flavor_id))
@@ -141,3 +143,4 @@ class FlavorExtraSpecsV286Test(FlavorExtraSpecsTest):
# this should pass because we don't recognize the extra spec but it's
# not in a namespace we care about
self.admin_api.put_extra_spec(self.flavor_id, spec_id, body)
+ self.assertEqual(body, self.admin_api.get_extra_specs(self.flavor_id))
diff --git a/nova/tests/functional/test_policy.py b/nova/tests/functional/test_policy.py
index e70bca2f71..6f08c64328 100644
--- a/nova/tests/functional/test_policy.py
+++ b/nova/tests/functional/test_policy.py
@@ -11,10 +11,10 @@
# under the License.
import datetime
-import functools
from oslo_utils import timeutils
+import nova.policies.base
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
@@ -58,7 +58,7 @@ class HostStatusPolicyTestCase(test.TestCase,
# all users are allowed to see UNKNOWN host status only.
self.policy.set_rules({
self.host_status_rule: 'rule:admin_api',
- self.host_status_unknown_only_rule: '@'},
+ self.host_status_unknown_only_rule: nova.policies.base.RULE_ANY},
# This is needed to avoid nulling out the rest of default policy.
overwrite=False)
# Create a server as a normal non-admin user.
@@ -85,16 +85,18 @@ class HostStatusPolicyTestCase(test.TestCase,
reset_state = {'os-resetState': {'state': 'active'}}
self.admin_api.post_server_action(server['id'], reset_state)
- def _test_host_status_unknown_only(self, admin_func, func):
- # Get server as admin.
- server = self._get_server(admin_func())
+ def _test_host_status_unknown_only(self, func_name, *args):
+ admin_func = getattr(self.admin_api, func_name)
+ func = getattr(self.api, func_name)
+ # Run the operation as admin and extract the server from the response.
+ server = self._get_server(admin_func(*args))
# We need to wait for ACTIVE if this was a post rebuild server action,
# else a subsequent rebuild request will fail with a 409 in the API.
self._wait_for_state_change(server, 'ACTIVE')
# Verify admin can see the host status UP.
self.assertEqual('UP', server['host_status'])
# Get server as normal non-admin user.
- server = self._get_server(func())
+ server = self._get_server(func(*args))
self._wait_for_state_change(server, 'ACTIVE')
# Verify non-admin do not receive the host_status field because it is
# not UNKNOWN.
@@ -105,49 +107,47 @@ class HostStatusPolicyTestCase(test.TestCase,
minutes_from_now = timeutils.utcnow() + datetime.timedelta(minutes=30)
timeutils.set_time_override(override_time=minutes_from_now)
self.addCleanup(timeutils.clear_time_override)
- # Get server as admin.
- server = self._get_server(admin_func())
+ # Run the operation as admin and extract the server from the response.
+ server = self._get_server(admin_func(*args))
+ # Verify admin can see the host status UNKNOWN.
+ self.assertEqual('UNKNOWN', server['host_status'])
# Now that the compute service is down, the rebuild will not ever
# complete. But we're only interested in what would be returned from
# the API post rebuild action, so reset the state to ACTIVE to allow
# the next rebuild request to go through without a 409 error.
self._set_server_state_active(server)
- # Verify admin can see the host status UNKNOWN.
- self.assertEqual('UNKNOWN', server['host_status'])
- # Get server as normal non-admin user.
- server = self._get_server(func())
- self._set_server_state_active(server)
+ # Run the operation as a normal non-admin user and extract the server
+ # from the response.
+ server = self._get_server(func(*args))
# Verify non-admin can see the host status UNKNOWN too.
self.assertEqual('UNKNOWN', server['host_status'])
+ self._set_server_state_active(server)
# Now, adjust the policy to make it so only admin are allowed to see
# UNKNOWN host status only.
self.policy.set_rules({
self.host_status_unknown_only_rule: 'rule:admin_api'},
overwrite=False)
- # Get server as normal non-admin user.
- server = self._get_server(func())
- self._set_server_state_active(server)
+ # Run the operation as a normal non-admin user and extract the server
+ # from the response.
+ server = self._get_server(func(*args))
# Verify non-admin do not receive the host_status field.
self.assertNotIn('host_status', server)
+ self._set_server_state_active(server)
# Verify that admin will not receive ths host_status field if the
# API microversion < 2.16.
with utils.temporary_mutation(self.admin_api, microversion='2.15'):
- server = self._get_server(admin_func())
+ server = self._get_server(admin_func(*args))
self.assertNotIn('host_status', server)
def test_get_server_host_status_unknown_only(self):
server = self._setup_host_status_unknown_only_test()
# GET /servers/{server_id}
- admin_func = functools.partial(self.admin_api.get_server, server['id'])
- func = functools.partial(self.api.get_server, server['id'])
- self._test_host_status_unknown_only(admin_func, func)
+ self._test_host_status_unknown_only('get_server', server['id'])
def test_get_servers_detail_host_status_unknown_only(self):
self._setup_host_status_unknown_only_test()
# GET /servers/detail
- admin_func = functools.partial(self.admin_api.get_servers)
- func = functools.partial(self.api.get_servers)
- self._test_host_status_unknown_only(admin_func, func)
+ self._test_host_status_unknown_only('get_servers')
def test_put_server_host_status_unknown_only(self):
# The host_status field is returned from PUT /servers/{server_id}
@@ -156,11 +156,9 @@ class HostStatusPolicyTestCase(test.TestCase,
self.admin_api.microversion = '2.75'
server = self._setup_host_status_unknown_only_test(networks='none')
# PUT /servers/{server_id}
- an_update = {'server': {'name': 'host-status-unknown-only'}}
- admin_func = functools.partial(self.admin_api.put_server, server['id'],
- an_update)
- func = functools.partial(self.api.put_server, server['id'], an_update)
- self._test_host_status_unknown_only(admin_func, func)
+ update = {'server': {'name': 'host-status-unknown-only'}}
+ self._test_host_status_unknown_only('put_server', server['id'],
+ update)
def test_post_server_rebuild_host_status_unknown_only(self):
# The host_status field is returned from POST
@@ -170,8 +168,5 @@ class HostStatusPolicyTestCase(test.TestCase,
server = self._setup_host_status_unknown_only_test(networks='none')
# POST /servers/{server_id}/action (rebuild)
rebuild = {'rebuild': {'imageRef': self.image_uuid}}
- admin_func = functools.partial(self.admin_api.post_server_action,
- server['id'], rebuild)
- func = functools.partial(self.api.post_server_action, server['id'],
- rebuild)
- self._test_host_status_unknown_only(admin_func, func)
+ self._test_host_status_unknown_only('post_server_action', server['id'],
+ rebuild)
diff --git a/nova/tests/functional/test_server_rescue.py b/nova/tests/functional/test_server_rescue.py
new file mode 100644
index 0000000000..fa96c10344
--- /dev/null
+++ b/nova/tests/functional/test_server_rescue.py
@@ -0,0 +1,100 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.functional import integrated_helpers
+
+
+class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
+ """Base class for various boot from volume rescue tests."""
+
+ def setUp(self):
+ super(BFVRescue, self).setUp()
+ self.useFixture(nova_fixtures.CinderFixture(self))
+ self._start_compute(host='host1')
+
+ def _create_bfv_server(self):
+ server_request = self._build_server(networks=[])
+ server_request.pop('imageRef')
+ server_request['block_device_mapping_v2'] = [{
+ 'boot_index': 0,
+ 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'}]
+ server = self.api.post_server({'server': server_request})
+ self._wait_for_state_change(server, 'ACTIVE')
+ return server
+
+
+class DisallowBFVRescuev286(BFVRescue):
+ """Asserts that BFV rescue requests fail prior to microversion 2.87.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = '2.86'
+
+ def test_bfv_rescue_not_supported(self):
+ server = self._create_bfv_server()
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
+ 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Cannot rescue a volume-backed instance',
+ ex.response.text)
+
+
+class DisallowBFVRescuev286WithTrait(BFVRescue):
+ """Asserts that BFV rescue requests fail prior to microversion 2.87 even
+ when the required COMPUTE_RESCUE_BFV trait is reported by the compute.
+ """
+ compute_driver = 'fake.RescueBFVDriver'
+ microversion = '2.86'
+
+ def test_bfv_rescue_not_supported(self):
+ server = self._create_bfv_server()
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
+ 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Cannot rescue a volume-backed instance',
+ ex.response.text)
+
+
+class DisallowBFVRescuev287WithoutTrait(BFVRescue):
+ """Asserts that BFV rescue requests fail with microversion 2.87 (or later)
+ when the required COMPUTE_RESCUE_BFV trait is not reported by the compute.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_not_supported(self):
+ server = self._create_bfv_server()
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
+ 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Host unable to rescue a volume-backed instance',
+ ex.response.text)
+
+
+class AllowBFVRescuev287WithTrait(BFVRescue):
+ """Asserts that BFV rescue requests pass with microversion 2.87 (or later)
+ when the required COMPUTE_RESCUE_BFV trait is reported by the compute.
+ """
+ compute_driver = 'fake.RescueBFVDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_supported(self):
+ server = self._create_bfv_server()
+ self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self._wait_for_state_change(server, 'RESCUE')
diff --git a/nova/tests/unit/api/openstack/compute/test_aggregates.py b/nova/tests/unit/api/openstack/compute/test_aggregates.py
index 34e34ba85b..fb096861eb 100644
--- a/nova/tests/unit/api/openstack/compute/test_aggregates.py
+++ b/nova/tests/unit/api/openstack/compute/test_aggregates.py
@@ -116,11 +116,6 @@ class AggregateTestCaseV21(test.NoDBTestCase):
self._assert_agg_data(AGGREGATE_LIST, _make_agg_list(result))
self.assertTrue(mock_list.called)
- def test_index_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.index,
- self.user_req)
-
def test_create(self):
with mock.patch.object(self.controller.api, 'create_aggregate',
return_value=AGGREGATE) as mock_create:
@@ -131,12 +126,6 @@ class AggregateTestCaseV21(test.NoDBTestCase):
self._assert_agg_data(FORMATTED_AGGREGATE, _make_agg_obj(result))
mock_create.assert_called_once_with(self.context, 'test', 'nova1')
- def test_create_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.create, self.user_req,
- body={"aggregate": {"name": "test",
- "availability_zone": "nova1"}})
-
def test_create_with_duplicate_aggregate_name(self):
side_effect = exception.AggregateNameExists(aggregate_name="test")
with mock.patch.object(self.controller.api, 'create_aggregate',
@@ -294,11 +283,6 @@ class AggregateTestCaseV21(test.NoDBTestCase):
self._assert_agg_data(AGGREGATE, _make_agg_obj(aggregate))
mock_get.assert_called_once_with(self.context, '1')
- def test_show_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.show,
- self.user_req, "1")
-
def test_show_with_bad_aggregate(self):
side_effect = exception.AggregateNotFound(aggregate_id='2')
with mock.patch.object(self.controller.api, 'get_aggregate',
@@ -323,12 +307,6 @@ class AggregateTestCaseV21(test.NoDBTestCase):
mock_update.assert_called_once_with(self.context, '1',
body["aggregate"])
- def test_update_no_admin(self):
- body = {"aggregate": {"availability_zone": "nova"}}
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.update,
- self.user_req, "1", body=body)
-
def test_update_with_only_name(self):
body = {"aggregate": {"name": "new_name"}}
with mock.patch.object(self.controller.api, 'update_aggregate',
@@ -459,12 +437,6 @@ class AggregateTestCaseV21(test.NoDBTestCase):
self._assert_agg_data(AGGREGATE, _make_agg_obj(aggregate))
mock_add.assert_called_once_with(self.context, "1", "host1")
- def test_add_host_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- eval(self.add_host),
- self.user_req, "1",
- body={"add_host": {"host": "host1"}})
-
def test_add_host_with_already_added_host(self):
side_effect = exception.AggregateHostExists(aggregate_id="1",
host="host1")
@@ -541,12 +513,6 @@ class AggregateTestCaseV21(test.NoDBTestCase):
body={"remove_host": {"host": "host1"}})
mock_rem.assert_called_once_with(self.context, "1", "host1")
- def test_remove_host_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- eval(self.remove_host),
- self.user_req, "1",
- body={"remove_host": {"host": "host1"}})
-
def test_remove_host_with_bad_aggregate(self):
side_effect = exception.AggregateNotFound(
aggregate_id="2")
@@ -650,13 +616,6 @@ class AggregateTestCaseV21(test.NoDBTestCase):
mocked.assert_called_once_with(self.context, "1",
body["set_metadata"]["metadata"])
- def test_set_metadata_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- eval(self.set_metadata),
- self.user_req, "1",
- body={"set_metadata": {"metadata":
- {"foo": "bar"}}})
-
def test_set_metadata_with_bad_aggregate(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
side_effect = exception.AggregateNotFound(aggregate_id="2")
@@ -715,11 +674,6 @@ class AggregateTestCaseV21(test.NoDBTestCase):
self.controller.delete(self.req, "1")
mock_del.assert_called_once_with(self.context, "1")
- def test_delete_aggregate_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.delete,
- self.user_req, "1")
-
def test_delete_aggregate_with_bad_aggregate(self):
side_effect = exception.AggregateNotFound(
aggregate_id="2")
diff --git a/nova/tests/unit/api/openstack/compute/test_evacuate.py b/nova/tests/unit/api/openstack/compute/test_evacuate.py
index c94cf5a811..ac1e2f81ca 100644
--- a/nova/tests/unit/api/openstack/compute/test_evacuate.py
+++ b/nova/tests/unit/api/openstack/compute/test_evacuate.py
@@ -192,13 +192,6 @@ class EvacuateTestV21(test.NoDBTestCase):
self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'True'})
- def test_not_admin(self):
- body = {'evacuate': {'host': 'my-host',
- 'onSharedStorage': 'False'}}
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._evacuate,
- self.req, self.UUID, body=body)
-
def test_evacuate_to_same_host(self):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'host1',
@@ -316,12 +309,6 @@ class EvacuateTestV214(EvacuateTestV21):
self.assertEqual(admin_pass,
mock_evacuate.call_args_list[0][0][4])
- def test_not_admin(self):
- body = {'evacuate': {'host': 'my-host'}}
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._evacuate,
- self.req, self.UUID, body=body)
-
@testtools.skip('onSharedStorage was removed from Microversion 2.14')
@mock.patch('nova.objects.Instance.save')
def test_evacuate_shared_and_pass(self, mock_save):
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
index d973e1d080..f8a7bfbcb6 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
@@ -153,14 +153,6 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
with mock.patch('nova.objects.Flavor.save'):
self.controller.delete(req, 1, 'hw:numa_nodes')
- def test_delete_no_admin(self):
- self.stub_out('nova.objects.flavor._flavor_extra_specs_del',
- delete_flavor_extra_specs)
-
- req = self._get_request('1/os-extra_specs/hw:numa_nodes')
- self.assertRaises(exception.Forbidden, self.controller.delete,
- req, 1, 'hw numa nodes')
-
def test_delete_spec_not_found(self):
req = self._get_request('1/os-extra_specs/key6',
use_admin_context=True)
@@ -181,13 +173,6 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
self.assertEqual('shared', res_dict['extra_specs']['hw:cpu_policy'])
self.assertEqual('1', res_dict['extra_specs']['hw:numa_nodes'])
- def test_create_no_admin(self):
- body = {'extra_specs': {'hw:numa_nodes': '1'}}
-
- req = self._get_request('1/os-extra_specs')
- self.assertRaises(exception.Forbidden, self.controller.create,
- req, 1, body=body)
-
def test_create_flavor_not_found(self):
body = {'extra_specs': {'hw:numa_nodes': '1'}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
@@ -272,6 +257,17 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
'hw:cpu_policy': 'sharrred',
'hw:cpu_policyyyyyyy': 'shared',
'hw:foo': 'bar',
+ 'resources:VCPU': 'N',
+ 'resources_foo:VCPU': 'N',
+ 'resources:VVCPU': '1',
+ 'resources_foo:VVCPU': '1',
+ 'trait:STORAGE_DISK_SSD': 'forbiden',
+ 'trait_foo:HW_CPU_X86_AVX2': 'foo',
+ 'trait:bar': 'required',
+ 'trait_foo:bar': 'required',
+ 'trait:CUSTOM_foo': 'required',
+ 'trait:CUSTOM_FOO': 'bar',
+ 'trait_foo:CUSTOM_BAR': 'foo',
}
for key, value in invalid_specs.items():
body = {'extra_specs': {key: value}}
@@ -303,12 +299,22 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
'hide_hypervisor_id': 'true',
'hw:numa_nodes': '1',
'hw:numa_cpus.0': '0-3,8-9,11,10',
+ 'resources:VCPU': '4',
+ 'resources_foo:VCPU': '4',
+ 'resources:CUSTOM_FOO': '1',
+ 'resources_foo:CUSTOM_BAR': '2',
+ 'trait:STORAGE_DISK_SSD': 'forbidden',
+ 'trait_foo:HW_CPU_X86_AVX2': 'required',
+ 'trait:CUSTOM_FOO': 'forbidden',
+ 'trait_foo:CUSTOM_BAR': 'required',
}
mock_flavor_extra_specs.side_effect = return_create_flavor_extra_specs
for key, value in valid_specs.items():
body = {"extra_specs": {key: value}}
- req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ req = self._get_request(
+ '1/os-extra_specs', use_admin_context=True, version='2.86',
+ )
res_dict = self.controller.create(req, 1, body=body)
self.assertEqual(value, res_dict['extra_specs'][key])
@@ -323,13 +329,6 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
self.assertEqual('shared', res_dict['hw:cpu_policy'])
- def test_update_item_no_admin(self):
- body = {'hw:cpu_policy': 'shared'}
-
- req = self._get_request('1/os-extra_specs/hw:cpu_policy')
- self.assertRaises(exception.Forbidden, self.controller.update,
- req, 1, 'key1', body=body)
-
def _test_update_item_bad_request(self, body):
req = self._get_request('1/os-extra_specs/hw:cpu_policy',
use_admin_context=True)
diff --git a/nova/tests/unit/api/openstack/compute/test_keypairs.py b/nova/tests/unit/api/openstack/compute/test_keypairs.py
index 674cb18ec2..5cf5471308 100644
--- a/nova/tests/unit/api/openstack/compute/test_keypairs.py
+++ b/nova/tests/unit/api/openstack/compute/test_keypairs.py
@@ -14,16 +14,13 @@
# under the License.
import mock
-from oslo_policy import policy as oslo_policy
import webob
from nova.api.openstack.compute import keypairs as keypairs_v21
from nova.api.openstack import wsgi as os_wsgi
-from nova.compute import api as compute_api
from nova import context as nova_context
from nova import exception
from nova import objects
-from nova import policy
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -319,93 +316,6 @@ class KeypairsTestV21(test.TestCase):
self.assertNotIn('type', res_dict['keypair'])
-class KeypairPolicyTestV21(test.NoDBTestCase):
- KeyPairController = keypairs_v21.KeypairController()
- policy_path = 'os_compute_api:os-keypairs'
-
- def setUp(self):
- super(KeypairPolicyTestV21, self).setUp()
-
- @staticmethod
- def _db_key_pair_get(context, user_id, name=None):
- if name is not None:
- return dict(test_keypair.fake_keypair,
- name='foo', public_key='XXX', fingerprint='YYY',
- type='ssh')
- else:
- return db_key_pair_get_all_by_user(context, user_id)
-
- self.stub_out("nova.objects.keypair.KeyPair._get_from_db",
- _db_key_pair_get)
-
- self.req = fakes.HTTPRequest.blank('')
-
- def test_keypair_list_fail_policy(self):
- rules = {self.policy_path + ':index': 'role:admin'}
- policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.assertRaises(exception.Forbidden,
- self.KeyPairController.index,
- self.req)
-
- @mock.patch('nova.objects.KeyPairList.get_by_user')
- def test_keypair_list_pass_policy(self, mock_get):
- rules = {self.policy_path + ':index': ''}
- policy.set_rules(oslo_policy.Rules.from_dict(rules))
- res = self.KeyPairController.index(self.req)
- self.assertIn('keypairs', res)
-
- def test_keypair_show_fail_policy(self):
- rules = {self.policy_path + ':show': 'role:admin'}
- policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.assertRaises(exception.Forbidden,
- self.KeyPairController.show,
- self.req, 'FAKE')
-
- def test_keypair_show_pass_policy(self):
- rules = {self.policy_path + ':show': ''}
- policy.set_rules(oslo_policy.Rules.from_dict(rules))
- res = self.KeyPairController.show(self.req, 'FAKE')
- self.assertIn('keypair', res)
-
- def test_keypair_create_fail_policy(self):
- body = {'keypair': {'name': 'create_test'}}
- rules = {self.policy_path + ':create': 'role:admin'}
- policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.assertRaises(exception.Forbidden,
- self.KeyPairController.create,
- self.req, body=body)
-
- def _assert_keypair_create(self, mock_create, req):
- mock_create.assert_called_with(req, 'fake_user', 'create_test', 'ssh')
-
- @mock.patch.object(compute_api.KeypairAPI, 'create_key_pair')
- def test_keypair_create_pass_policy(self, mock_create):
- keypair_obj = objects.KeyPair(name='', public_key='',
- fingerprint='', user_id='')
-
- mock_create.return_value = (keypair_obj, 'dummy')
- body = {'keypair': {'name': 'create_test'}}
- rules = {self.policy_path + ':create': ''}
- policy.set_rules(oslo_policy.Rules.from_dict(rules))
- res = self.KeyPairController.create(self.req, body=body)
- self.assertIn('keypair', res)
- req = self.req.environ['nova.context']
- self._assert_keypair_create(mock_create, req)
-
- def test_keypair_delete_fail_policy(self):
- rules = {self.policy_path + ':delete': 'role:admin'}
- policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.assertRaises(exception.Forbidden,
- self.KeyPairController.delete,
- self.req, 'FAKE')
-
- @mock.patch('nova.objects.KeyPair.destroy_by_name')
- def test_keypair_delete_pass_policy(self, mock_destroy):
- rules = {self.policy_path + ':delete': ''}
- policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.KeyPairController.delete(self.req, 'FAKE')
-
-
class KeypairsTestV22(KeypairsTestV21):
wsgi_api_version = '2.2'
@@ -448,14 +358,6 @@ class KeypairsTestV210(KeypairsTestV22):
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
- def test_keypair_list_other_user_not_admin(self):
- req = fakes.HTTPRequest.blank(self.base_url +
- '/os-keypairs?user_id=foo',
- version=self.wsgi_api_version)
- with mock.patch.object(self.controller.api, 'get_key_pairs'):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.index, req)
-
def test_keypair_show_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
@@ -466,14 +368,6 @@ class KeypairsTestV210(KeypairsTestV22):
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
- def test_keypair_show_other_user_not_admin(self):
- req = fakes.HTTPRequest.blank(self.base_url +
- '/os-keypairs/FAKE?user_id=foo',
- version=self.wsgi_api_version)
- with mock.patch.object(self.controller.api, 'get_key_pair'):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.show, req, 'FAKE')
-
def test_keypair_delete_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
@@ -485,14 +379,6 @@ class KeypairsTestV210(KeypairsTestV22):
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
- def test_keypair_delete_other_user_not_admin(self):
- req = fakes.HTTPRequest.blank(self.base_url +
- '/os-keypairs/FAKE?user_id=foo',
- version=self.wsgi_api_version)
- with mock.patch.object(self.controller.api, 'delete_key_pair'):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.delete, req, 'FAKE')
-
def test_keypair_create_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
@@ -523,16 +409,6 @@ class KeypairsTestV210(KeypairsTestV22):
self.assertEqual('8861f37f-034e-4ca8-8abe-6d13c074574a', userid)
self.assertIn('keypair', res)
- def test_keypair_create_other_user_not_admin(self):
- req = fakes.HTTPRequest.blank(self.base_url +
- '/os-keypairs',
- version=self.wsgi_api_version)
- body = {'keypair': {'name': 'create_test',
- 'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a'}}
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.create,
- req, body=body)
-
def test_keypair_list_other_user_invalid_in_old_microversion(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs?user_id=foo',
diff --git a/nova/tests/unit/api/openstack/compute/test_quota_classes.py b/nova/tests/unit/api/openstack/compute/test_quota_classes.py
index ab8a278196..bdb33a7e1a 100644
--- a/nova/tests/unit/api/openstack/compute/test_quota_classes.py
+++ b/nova/tests/unit/api/openstack/compute/test_quota_classes.py
@@ -156,32 +156,3 @@ class QuotaClassSetsTestV257(QuotaClassSetsTestV250):
for resource in quota_classes_v21.FILTERED_QUOTAS_2_57:
self.quota_resources.pop(resource, None)
self.filtered_quotas.extend(quota_classes_v21.FILTERED_QUOTAS_2_57)
-
-
-class QuotaClassesPolicyEnforcementV21(test.NoDBTestCase):
-
- def setUp(self):
- super(QuotaClassesPolicyEnforcementV21, self).setUp()
- self.controller = quota_classes_v21.QuotaClassSetsController()
- self.req = fakes.HTTPRequest.blank('')
-
- def test_show_policy_failed(self):
- rule_name = "os_compute_api:os-quota-class-sets:show"
- self.policy.set_rules({rule_name: "quota_class:non_fake"})
- exc = self.assertRaises(
- exception.PolicyNotAuthorized,
- self.controller.show, self.req, fakes.FAKE_UUID)
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
-
- def test_update_policy_failed(self):
- rule_name = "os_compute_api:os-quota-class-sets:update"
- self.policy.set_rules({rule_name: "quota_class:non_fake"})
- exc = self.assertRaises(
- exception.PolicyNotAuthorized,
- self.controller.update, self.req, fakes.FAKE_UUID,
- body={'quota_class_set': {}})
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
diff --git a/nova/tests/unit/api/openstack/compute/test_quotas.py b/nova/tests/unit/api/openstack/compute/test_quotas.py
index 8e2d216882..a002408a0e 100644
--- a/nova/tests/unit/api/openstack/compute/test_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_quotas.py
@@ -502,65 +502,6 @@ class UserQuotasTestV21(BaseQuotaSetsTest):
len(mock_createlimit.mock_calls))
-class QuotaSetsPolicyEnforcementV21(test.NoDBTestCase):
-
- def setUp(self):
- super(QuotaSetsPolicyEnforcementV21, self).setUp()
- self.controller = quotas_v21.QuotaSetsController()
- self.req = fakes.HTTPRequest.blank('')
-
- def test_delete_policy_failed(self):
- rule_name = "os_compute_api:os-quota-sets:delete"
- self.policy.set_rules({rule_name: "project_id:non_fake"})
- exc = self.assertRaises(
- exception.PolicyNotAuthorized,
- self.controller.delete, self.req, fakes.FAKE_UUID)
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
-
- def test_defaults_policy_failed(self):
- rule_name = "os_compute_api:os-quota-sets:defaults"
- self.policy.set_rules({rule_name: "project_id:non_fake"})
- exc = self.assertRaises(
- exception.PolicyNotAuthorized,
- self.controller.defaults, self.req, fakes.FAKE_UUID)
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
-
- def test_show_policy_failed(self):
- rule_name = "os_compute_api:os-quota-sets:show"
- self.policy.set_rules({rule_name: "project_id:non_fake"})
- exc = self.assertRaises(
- exception.PolicyNotAuthorized,
- self.controller.show, self.req, fakes.FAKE_UUID)
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
-
- def test_detail_policy_failed(self):
- rule_name = "os_compute_api:os-quota-sets:detail"
- self.policy.set_rules({rule_name: "project_id:non_fake"})
- exc = self.assertRaises(
- exception.PolicyNotAuthorized,
- self.controller.detail, self.req, fakes.FAKE_UUID)
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
-
- def test_update_policy_failed(self):
- rule_name = "os_compute_api:os-quota-sets:update"
- self.policy.set_rules({rule_name: "project_id:non_fake"})
- exc = self.assertRaises(
- exception.PolicyNotAuthorized,
- self.controller.update, self.req, fakes.FAKE_UUID,
- body={'quota_set': {}})
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
-
-
class QuotaSetsTestV236(test.NoDBTestCase):
microversion = '2.36'
diff --git a/nova/tests/unit/api/openstack/compute/test_rescue.py b/nova/tests/unit/api/openstack/compute/test_rescue.py
index 7f229ea356..fd2cdfa13b 100644
--- a/nova/tests/unit/api/openstack/compute/test_rescue.py
+++ b/nova/tests/unit/api/openstack/compute/test_rescue.py
@@ -15,6 +15,9 @@
import mock
import webob
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova.api.openstack import api_version_request
from nova.api.openstack.compute import rescue as rescue_v21
from nova import compute
import nova.conf
@@ -28,7 +31,7 @@ UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
def rescue(self, context, instance, rescue_password=None,
- rescue_image_ref=None):
+ rescue_image_ref=None, allow_bfv_rescue=False):
pass
@@ -57,6 +60,9 @@ class RescueTestV21(test.NoDBTestCase):
def _set_up_controller(self):
return rescue_v21.RescueController()
+ def _allow_bfv_rescue(self):
+ return api_version_request.is_supported(self.fake_req, '2.87')
+
@mock.patch.object(compute.api.API, "rescue")
def test_rescue_from_locked_server(self, mock_rescue):
mock_rescue.side_effect = exception.InstanceIsLocked(
@@ -173,7 +179,8 @@ class RescueTestV21(test.NoDBTestCase):
mock.ANY,
instance,
rescue_password=u'ABC123',
- rescue_image_ref=self.image_uuid)
+ rescue_image_ref=self.image_uuid,
+ allow_bfv_rescue=self._allow_bfv_rescue())
@mock.patch('nova.compute.api.API.rescue')
@mock.patch('nova.api.openstack.common.get_instance')
@@ -187,9 +194,9 @@ class RescueTestV21(test.NoDBTestCase):
resp_json = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual("ABC123", resp_json['adminPass'])
- mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
- rescue_password=u'ABC123',
- rescue_image_ref=None)
+ mock_compute_api_rescue.assert_called_with(
+ mock.ANY, instance, rescue_password=u'ABC123',
+ rescue_image_ref=None, allow_bfv_rescue=self._allow_bfv_rescue())
def test_rescue_with_none(self):
body = dict(rescue=None)
@@ -212,3 +219,28 @@ class RescueTestV21(test.NoDBTestCase):
self.assertRaises(exception.ValidationError,
self.controller._rescue,
self.fake_req, UUID, body=body)
+
+
+class RescueTestV287(RescueTestV21):
+
+ def setUp(self):
+ super(RescueTestV287, self).setUp()
+ v287_req = api_version_request.APIVersionRequest('2.87')
+ self.fake_req.api_version_request = v287_req
+
+ @mock.patch('nova.compute.api.API.rescue')
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_allow_bfv_rescue(self, mock_get_instance, mock_compute_rescue):
+ instance = fake_instance.fake_instance_obj(
+ self.fake_req.environ['nova.context'])
+ mock_get_instance.return_value = instance
+
+ body = {"rescue": {"adminPass": "ABC123"}}
+ self.controller._rescue(self.fake_req, uuids.instance, body=body)
+
+ # Assert that allow_bfv_rescue is True for this 2.87 request
+ mock_get_instance.assert_called_once_with(
+ mock.ANY, mock.ANY, uuids.instance)
+ mock_compute_rescue.assert_called_with(
+ mock.ANY, instance, rescue_image_ref=None,
+ rescue_password=u'ABC123', allow_bfv_rescue=True)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_reset_state.py b/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
index c1d598a396..591ab493a4 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
@@ -44,6 +44,7 @@ class ResetStateTestsV21(test.NoDBTestCase):
instance.uuid = self.uuid
instance.vm_state = 'fake'
instance.task_state = 'fake'
+ instance.project_id = self.context.project_id
instance.obj_reset_changes()
return instance
diff --git a/nova/tests/unit/api/openstack/compute/test_server_topology.py b/nova/tests/unit/api/openstack/compute/test_server_topology.py
index 34372f27dc..ab216f033b 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_topology.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_topology.py
@@ -11,7 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
@@ -23,7 +22,6 @@ from nova import objects
from nova.objects import instance_numa as numa
from nova import test
from nova.tests.unit.api.openstack import fakes
-from nova.tests.unit import fake_instance
class ServerTopologyTestV278(test.NoDBTestCase):
@@ -100,28 +98,3 @@ class ServerTopologyTestV278(test.NoDBTestCase):
req,
self.uuid)
self.assertEqual(400, excep.code)
-
-
-class ServerTopologyEnforcementV278(test.NoDBTestCase):
- api_version = '2.78'
-
- def setUp(self):
- super(ServerTopologyEnforcementV278, self).setUp()
- self.controller = server_topology.ServerTopologyController()
- self.req = fakes.HTTPRequest.blank('', version=self.api_version)
- context = self.req.environ['nova.context']
- self.mock_get = self.useFixture(
- fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
- self.instance = fake_instance.fake_instance_obj(
- context, id=1, project_id=context.project_id)
- self.mock_get.return_value = self.instance
-
- def test_get_topology_policy_failed(self):
- rule_name = "compute:server:topology:index"
- self.policy.set_rules({rule_name: "project:non_fake"})
- exc = self.assertRaises(
- exception.PolicyNotAuthorized,
- self.controller.index, self.req, fakes.FAKE_UUID)
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
diff --git a/nova/tests/unit/api/openstack/compute/test_serversV21.py b/nova/tests/unit/api/openstack/compute/test_serversV21.py
index 4efc815d01..2e3cd70d42 100644
--- a/nova/tests/unit/api/openstack/compute/test_serversV21.py
+++ b/nova/tests/unit/api/openstack/compute/test_serversV21.py
@@ -276,6 +276,22 @@ class ControllerTest(test.TestCase):
class ServersControllerTest(ControllerTest):
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
+ def setUp(self):
+ super(ServersControllerTest, self).setUp()
+ self.request = fakes.HTTPRequest.blank(
+ self.path_with_id_v2 % FAKE_UUID,
+ use_admin_context=False,
+ version=self.wsgi_api_version)
+ return_server = fakes.fake_compute_get(
+ id=2, availability_zone='nova',
+ launched_at=None,
+ terminated_at=None,
+ task_state=None,
+ vm_state=vm_states.ACTIVE,
+ power_state=1,
+ project_id=self.request.environ['nova.context'].project_id)
+ self.mock_get.side_effect = return_server
+
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
@@ -348,8 +364,7 @@ class ServersControllerTest(ControllerTest):
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_get_server_by_uuid(self):
- req = self.req(self.path_with_id % FAKE_UUID)
- res_dict = self.controller.show(req, FAKE_UUID)
+ res_dict = self.controller.show(self.request, FAKE_UUID)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
def test_get_server_joins(self):
@@ -359,27 +374,31 @@ class ServersControllerTest(ControllerTest):
'numa_topology'], expected_attrs)
ctxt = context.RequestContext('fake', self.project_id)
return fake_instance.fake_instance_obj(
- ctxt, expected_attrs=expected_attrs)
-
+ ctxt, expected_attrs=expected_attrs,
+ project_id=self.request.environ['nova.context'].project_id)
self.mock_get.side_effect = fake_get
- req = self.req(self.path_with_id % FAKE_UUID)
- self.controller.show(req, FAKE_UUID)
+ self.controller.show(self.request, FAKE_UUID)
def test_unique_host_id(self):
"""Create two servers with the same host and different
project_ids and check that the host_id's are unique.
"""
def return_instance_with_host(context, *args, **kwargs):
- project_id = uuidutils.generate_uuid()
+ project_id = context.project_id
return fakes.stub_instance_obj(context, id=1, uuid=FAKE_UUID,
project_id=project_id,
host='fake_host')
- req = self.req(self.path_with_id % FAKE_UUID)
+ req1 = self.req(self.path_with_id % FAKE_UUID)
+ project_id = uuidutils.generate_uuid()
+ req2 = fakes.HTTPRequest.blank(self.path_with_id % FAKE_UUID,
+ version=self.wsgi_api_version,
+ project_id=project_id)
+
self.mock_get.side_effect = return_instance_with_host
- server1 = self.controller.show(req, FAKE_UUID)
- server2 = self.controller.show(req, FAKE_UUID)
+ server1 = self.controller.show(req1, FAKE_UUID)
+ server2 = self.controller.show(req2, FAKE_UUID)
self.assertNotEqual(server1['server']['hostId'],
server2['server']['hostId'])
@@ -390,9 +409,8 @@ class ServersControllerTest(ControllerTest):
"server": {
"id": uuid,
"user_id": "fake_user",
- "tenant_id": "fake_project",
- "updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
+ "updated": "2010-11-11T11:00:00Z",
"progress": progress,
"name": "server2",
"status": status,
@@ -456,7 +474,8 @@ class ServersControllerTest(ControllerTest):
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
- ]
+ ],
+ "tenant_id": self.request.environ['nova.context'].project_id
}
}
@@ -465,20 +484,23 @@ class ServersControllerTest(ControllerTest):
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
uuid = FAKE_UUID
- req = self.req(self.path_with_id_v2, uuid)
- res_dict = self.controller.show(req, uuid)
+ res_dict = self.controller.show(self.request, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
progress=0)
+ expected_server['server']['tenant_id'] = self.request.environ[
+ 'nova.context'].project_id
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_empty_az(self):
- self.mock_get.side_effect = fakes.fake_compute_get(
- availability_zone='')
uuid = FAKE_UUID
req = self.req(self.path_with_id_v2 % uuid)
+
+ self.mock_get.side_effect = fakes.fake_compute_get(
+ availability_zone='',
+ project_id=req.environ['nova.context'].project_id)
res_dict = self.controller.show(req, uuid)
self.assertEqual(res_dict['server']['OS-EXT-AZ:availability_zone'], '')
@@ -486,15 +508,17 @@ class ServersControllerTest(ControllerTest):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
- req = self.req(self.path_with_id % FAKE_UUID)
- res_dict = self.controller.show(req, FAKE_UUID)
+ res_dict = self.controller.show(self.request, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
+ expected_server['server']['tenant_id'] = self.request.environ[
+ 'nova.context'].project_id
+
self.assertThat(res_dict, matchers.DictMatches(expected_server))
self.mock_get.assert_called_once_with(
- req.environ['nova.context'], FAKE_UUID,
+ self.request.environ['nova.context'], FAKE_UUID,
expected_attrs=['flavor', 'info_cache', 'metadata',
'numa_topology'], cell_down_support=False)
@@ -502,16 +526,17 @@ class ServersControllerTest(ControllerTest):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
- req = self.req(self.path_with_id % FAKE_UUID)
- res_dict = self.controller.show(req, FAKE_UUID)
+ res_dict = self.controller.show(self.request, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
+ expected_server['server']['tenant_id'] = self.request.environ[
+ 'nova.context'].project_id
self.assertThat(res_dict, matchers.DictMatches(expected_server))
self.mock_get.assert_called_once_with(
- req.environ['nova.context'], FAKE_UUID,
+ self.request.environ['nova.context'], FAKE_UUID,
expected_attrs=['flavor', 'info_cache', 'metadata',
'numa_topology'], cell_down_support=False)
@@ -595,9 +620,10 @@ class ServersControllerTest(ControllerTest):
def test_show_server_hide_addresses_in_building(self):
uuid = FAKE_UUID
- self.mock_get.side_effect = fakes.fake_compute_get(
- uuid=uuid, vm_state=vm_states.BUILDING)
req = self.req(self.path_with_id_v2 % uuid)
+ self.mock_get.side_effect = fakes.fake_compute_get(
+ uuid=uuid, vm_state=vm_states.BUILDING,
+ project_id=req.environ['nova.context'].project_id)
res_dict = self.controller.show(req, uuid)
self.assertEqual({}, res_dict['server']['addresses'])
@@ -630,9 +656,10 @@ class ServersControllerTest(ControllerTest):
],
},
}
- self.mock_get.side_effect = fakes.fake_compute_get(
- nw_cache=nw_cache, uuid=uuid, vm_state=vm_states.ACTIVE)
req = self.req(self.path_with_id_v2 % uuid)
+ self.mock_get.side_effect = fakes.fake_compute_get(
+ nw_cache=nw_cache, uuid=uuid, vm_state=vm_states.ACTIVE,
+ project_id=req.environ['nova.context'].project_id)
res_dict = self.controller.show(req, uuid)
self.assertThat(res_dict['server']['addresses'],
matchers.DictMatches(expected['addresses']))
@@ -1635,11 +1662,12 @@ class ServersControllerTest(ControllerTest):
def test_show_server_usage(self):
DATE1 = datetime.datetime(year=2013, month=4, day=5, hour=12)
DATE2 = datetime.datetime(year=2013, month=4, day=5, hour=13)
- self.mock_get.side_effect = fakes.fake_compute_get(
- id=1, uuid=FAKE_UUID, launched_at=DATE1, terminated_at=DATE2)
req = self.req(self.path_with_id % FAKE_UUID)
req.accept = 'application/json'
req.method = 'GET'
+ self.mock_get.side_effect = fakes.fake_compute_get(
+ id=1, uuid=FAKE_UUID, launched_at=DATE1, terminated_at=DATE2,
+ project_id=req.environ['nova.context'].project_id)
res = req.get_response(compute.APIRouterV21())
self.assertEqual(res.status_int, 200)
self.useFixture(utils_fixture.TimeFixture())
@@ -1770,6 +1798,8 @@ class ServersControllerTestV23(ServersControllerTest):
def setUp(self):
super(ServersControllerTestV23, self).setUp()
+ self.request = self.req(self.path_with_id % FAKE_UUID)
+ self.project_id = self.request.environ['nova.context'].project_id
self.mock_get.side_effect = fakes.fake_compute_get(
id=2, uuid=FAKE_UUID,
node="node-fake",
@@ -1784,7 +1814,8 @@ class ServersControllerTestV23(ServersControllerTest):
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
- power_state=1)
+ power_state=1,
+ project_id=self.project_id)
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
@@ -1809,14 +1840,14 @@ class ServersControllerTestV23(ServersControllerTest):
server_dict['server']["os-extended-volumes:volumes_attached"] = [
{'id': 'some_volume_1', 'delete_on_termination': True},
{'id': 'some_volume_2', 'delete_on_termination': False}]
+ server_dict['server']["tenant_id"] = self.project_id
return server_dict
def test_show(self):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
- req = self.req(self.path_with_id % FAKE_UUID)
- res_dict = self.controller.show(req, FAKE_UUID)
+ res_dict = self.controller.show(self.request, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
@@ -1844,14 +1875,16 @@ class ServersControllerTestV23(ServersControllerTest):
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
- power_state=1)
+ power_state=1,
+ project_id=context.project_id)
obj_list.append(server)
return objects.InstanceList(objects=obj_list)
self.mock_get_all.side_effect = None
- self.mock_get_all.return_value = fake_get_all(context)
-
req = self.req(self.path_detail)
+ self.mock_get_all.return_value = fake_get_all(
+ req.environ['nova.context'])
+
servers_list = self.controller.detail(req)
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
@@ -1882,7 +1915,8 @@ class ServersControllerTestV29(ServersControllerTest):
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
- power_state=1)
+ power_state=1,
+ project_id=self.request.environ['nova.context'].project_id)
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
@@ -1908,11 +1942,15 @@ class ServersControllerTestV29(ServersControllerTest):
server_dict['server']["os-extended-volumes:volumes_attached"] = [
{'id': 'some_volume_1', 'delete_on_termination': True},
{'id': 'some_volume_2', 'delete_on_termination': False}]
+ server_dict['server']["tenant_id"] = self.request.environ[
+ 'nova.context'].project_id
return server_dict
def _test_get_server_with_lock(self, locked_by):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
+ req = self.req(self.path_with_id % FAKE_UUID)
+ project_id = req.environ['nova.context'].project_id
self.mock_get.side_effect = fakes.fake_compute_get(
id=2, locked_by=locked_by, uuid=FAKE_UUID,
node="node-fake",
@@ -1927,9 +1965,9 @@ class ServersControllerTestV29(ServersControllerTest):
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
- power_state=1)
+ power_state=1,
+ project_id=project_id)
- req = self.req(self.path_with_id % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
@@ -1937,6 +1975,7 @@ class ServersControllerTestV29(ServersControllerTest):
flavor_bookmark,
progress=0)
expected_server['server']['locked'] = True if locked_by else False
+ expected_server['server']['tenant_id'] = project_id
self.assertThat(res_dict, matchers.DictMatches(expected_server))
return res_dict
@@ -2032,7 +2071,8 @@ class ServersControllerTestV216(ServersControllerTest):
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
- power_state=1)
+ power_state=1,
+ project_id=self.request.environ['nova.context'].project_id)
self.mock_get_instance_host_status = self.useFixture(
fixtures.MockPatchObject(
compute_api.API, 'get_instance_host_status',
@@ -2066,6 +2106,8 @@ class ServersControllerTestV216(ServersControllerTest):
server_dict['server']["os-extended-volumes:volumes_attached"] = [
{'id': 'some_volume_1', 'delete_on_termination': True},
{'id': 'some_volume_2', 'delete_on_termination': False}]
+ server_dict['server']['tenant_id'] = self.request.environ[
+ 'nova.context'].project_id
return server_dict
@@ -2087,14 +2129,14 @@ class ServersControllerTestV216(ServersControllerTest):
def test_show(self):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
- req = self.req(self.path_with_id % FAKE_UUID)
- res_dict = self.controller.show(req, FAKE_UUID)
+ res_dict = self.controller.show(self.request, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
- func = functools.partial(self.controller.show, req, FAKE_UUID)
+ func = functools.partial(self.controller.show, self.request,
+ FAKE_UUID)
self._verify_host_status_policy_behavior(func)
def test_detail(self):
@@ -2118,14 +2160,16 @@ class ServersControllerTestV216(ServersControllerTest):
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
- power_state=1)
+ power_state=1,
+ project_id=context.project_id)
obj_list.append(server)
return objects.InstanceList(objects=obj_list)
self.mock_get_all.side_effect = None
- self.mock_get_all.return_value = fake_get_all(context)
-
req = self.req(self.path_detail)
+ self.mock_get_all.return_value = fake_get_all(
+ req.environ['nova.context'])
+
servers_list = self.controller.detail(req)
self.assertEqual(2, len(servers_list['servers']))
image_bookmark = "http://localhost/%s/images/10" % self.project_id
@@ -2163,7 +2207,8 @@ class ServersControllerTestV219(ServersControllerTest):
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
- power_state=1)
+ power_state=1,
+ project_id=self.request.environ['nova.context'].project_id)
self.useFixture(fixtures.MockPatchObject(
compute_api.API, 'get_instance_host_status',
return_value='UP')).mock
@@ -2200,6 +2245,8 @@ class ServersControllerTestV219(ServersControllerTest):
def _test_get_server_with_description(self, description):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
+ req = self.req(self.path_with_id % FAKE_UUID)
+ project_id = req.environ['nova.context'].project_id
self.mock_get.side_effect = fakes.fake_compute_get(
id=2, display_description=description, uuid=FAKE_UUID,
node="node-fake",
@@ -2214,9 +2261,9 @@ class ServersControllerTestV219(ServersControllerTest):
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
- power_state=1)
+ power_state=1,
+ project_id=project_id)
- req = self.req(self.path_with_id % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
@@ -2224,6 +2271,7 @@ class ServersControllerTestV219(ServersControllerTest):
flavor_bookmark,
progress=0,
description=description)
+ expected_server['server']['tenant_id'] = project_id
self.assertThat(res_dict, matchers.DictMatches(expected_server))
return res_dict
@@ -2265,7 +2313,8 @@ class ServersControllerTestV226(ControllerTest):
def fake_get(*args, **kwargs):
self.assertIn('tags', kwargs['expected_attrs'])
fake_server = fakes.stub_instance_obj(
- ctxt, id=2, vm_state=vm_states.ACTIVE, progress=100)
+ ctxt, id=2, vm_state=vm_states.ACTIVE, progress=100,
+ project_id=ctxt.project_id)
tag_list = objects.TagList(objects=[
objects.Tag(resource_id=FAKE_UUID, tag=tag)
@@ -2512,6 +2561,10 @@ class ServersControllerTestV271(ControllerTest):
def test_show_server_group_not_exist(self):
req = self.req(self.path_with_id % FAKE_UUID)
+ return_server = fakes.fake_compute_get(
+ id=2, vm_state=vm_states.ACTIVE,
+ project_id=req.environ['nova.context'].project_id)
+ self.mock_get.side_effect = return_server
servers = self.controller.show(req, FAKE_UUID)
expect_sg = []
self.assertEqual(expect_sg, servers['server']['server_groups'])
@@ -4040,11 +4093,13 @@ class ServerStatusTest(test.TestCase):
self.controller = servers.ServersController()
def _get_with_state(self, vm_state, task_state=None):
+ request = fakes.HTTPRequestV21.blank(self.path_with_id % FAKE_UUID)
self.stub_out('nova.compute.api.API.get',
- fakes.fake_compute_get(vm_state=vm_state,
- task_state=task_state))
+ fakes.fake_compute_get(
+ vm_state=vm_state,
+ task_state=task_state,
+ project_id=request.environ['nova.context'].project_id))
- request = fakes.HTTPRequestV21.blank(self.path_with_id % FAKE_UUID)
return self.controller.show(request, FAKE_UUID)
def test_active(self):
@@ -4065,6 +4120,11 @@ class ServerStatusTest(test.TestCase):
rule = {'compute:reboot': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank(self.path_action % '1234')
+ self.stub_out('nova.compute.api.API.get',
+ fakes.fake_compute_get(
+ vm_state='ACTIVE',
+ task_state=None,
+ project_id=req.environ['nova.context'].project_id))
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_reboot, req, '1234',
body={'reboot': {'type': 'HARD'}})
@@ -4087,6 +4147,12 @@ class ServerStatusTest(test.TestCase):
rule = {'compute:confirm_resize': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank(self.path_action % '1234')
+ self.stub_out('nova.compute.api.API.get',
+ fakes.fake_compute_get(
+ vm_state='ACTIVE',
+ task_state=None,
+ project_id=req.environ['nova.context'].project_id))
+
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_confirm_resize, req, '1234', {})
@@ -4103,6 +4169,12 @@ class ServerStatusTest(test.TestCase):
rule = {'compute:revert_resize': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank(self.path_action % '1234')
+ self.stub_out('nova.compute.api.API.get',
+ fakes.fake_compute_get(
+ vm_state='ACTIVE',
+ task_state=None,
+ project_id=req.environ['nova.context'].project_id))
+
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_revert_resize, req, '1234', {})
@@ -4685,7 +4757,7 @@ class ServersControllerCreateTest(test.TestCase):
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
@mock.patch.object(compute_api.API, '_validate_bdm')
- @mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
+ @mock.patch('nova.utils.get_bdm_image_metadata')
def test_create_instance_with_bdms_and_no_image(
self, mock_bdm_image_metadata, mock_validate_bdm, mock_get_vols):
mock_bdm_image_metadata.return_value = {}
@@ -4706,11 +4778,11 @@ class ServersControllerCreateTest(test.TestCase):
mock_validate_bdm.assert_called_once()
mock_bdm_image_metadata.assert_called_once_with(
- mock.ANY, mock.ANY, False)
+ mock.ANY, mock.ANY, mock.ANY, mock.ANY, False)
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
@mock.patch.object(compute_api.API, '_validate_bdm')
- @mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
+ @mock.patch('nova.utils.get_bdm_image_metadata')
def test_create_instance_with_bdms_and_empty_imageRef(
self, mock_bdm_image_metadata, mock_validate_bdm, mock_get_volumes):
mock_bdm_image_metadata.return_value = {}
@@ -4971,7 +5043,7 @@ class ServersControllerCreateTest(test.TestCase):
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create, params, no_image=True)
- @mock.patch('nova.compute.api.API._get_bdm_image_metadata')
+ @mock.patch('nova.utils.get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
params = {'block_device_mapping_v2': self.bdm_v2}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
@@ -5042,7 +5114,7 @@ class ServersControllerCreateTest(test.TestCase):
self.stub_out('nova.compute.api.API.create', create)
self._test_create_bdm(params)
- @mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
+ @mock.patch('nova.utils.get_bdm_image_metadata')
def test_create_instance_with_volumes_enabled_and_bdms_no_image(
self, mock_get_bdm_image_metadata):
"""Test that the create works if there is no image supplied but
@@ -5066,9 +5138,9 @@ class ServersControllerCreateTest(test.TestCase):
self.stub_out('nova.compute.api.API.create', create)
self._test_create_bdm(params, no_image=True)
mock_get_bdm_image_metadata.assert_called_once_with(
- mock.ANY, self.bdm, True)
+ mock.ANY, mock.ANY, mock.ANY, self.bdm, True)
- @mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
+ @mock.patch('nova.utils.get_bdm_image_metadata')
def test_create_instance_with_imageRef_as_empty_string(
self, mock_bdm_image_metadata):
volume = {
@@ -5111,7 +5183,7 @@ class ServersControllerCreateTest(test.TestCase):
self.assertRaises(exception.ValidationError,
self._test_create_bdm, params)
- @mock.patch('nova.compute.api.API._get_bdm_image_metadata')
+ @mock.patch('nova.utils.get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails_legacy_bdm(
self, fake_bdm_meta):
bdm = [{
@@ -8100,500 +8172,6 @@ class ServersInvalidRequestTestCase(test.TestCase):
self._invalid_server_create(body=body)
-class ServersPolicyEnforcementV21(test.NoDBTestCase):
-
- def setUp(self):
- super(ServersPolicyEnforcementV21, self).setUp()
- self.useFixture(nova_fixtures.AllServicesCurrent())
- self.controller = servers.ServersController()
- self.req = fakes.HTTPRequest.blank('')
- self.image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
-
- def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg):
- self.policy.set_rules(rules)
- exc = self.assertRaises(
- exception.PolicyNotAuthorized, func, *arg, **kwarg)
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
-
- @mock.patch.object(servers.ServersController, '_get_instance')
- def test_start_policy_failed(self, _get_instance_mock):
- _get_instance_mock.return_value = None
- rule_name = "os_compute_api:servers:start"
- rule = {rule_name: "project:non_fake"}
- self._common_policy_check(
- rule, rule_name, self.controller._start_server,
- self.req, FAKE_UUID, body={})
-
- @mock.patch.object(servers.ServersController, '_get_instance')
- def test_trigger_crash_dump_policy_failed_with_other_project(
- self, _get_instance_mock):
- _get_instance_mock.return_value = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'])
- rule_name = "os_compute_api:servers:trigger_crash_dump"
- rule = {rule_name: "project_id:%(project_id)s"}
- self.req.api_version_request =\
- api_version_request.APIVersionRequest('2.17')
- # Change the project_id in request context.
- self.req.environ['nova.context'].project_id = 'other-project'
- self._common_policy_check(
- rule, rule_name, self.controller._action_trigger_crash_dump,
- self.req, FAKE_UUID, body={'trigger_crash_dump': None})
-
- @mock.patch('nova.compute.api.API.trigger_crash_dump')
- @mock.patch.object(servers.ServersController, '_get_instance')
- def test_trigger_crash_dump_overridden_policy_pass_with_same_project(
- self, _get_instance_mock, trigger_crash_dump_mock):
- instance = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- project_id=self.req.environ['nova.context'].project_id)
- _get_instance_mock.return_value = instance
- rule_name = "os_compute_api:servers:trigger_crash_dump"
- self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
- self.req.api_version_request = (
- api_version_request.APIVersionRequest('2.17'))
- self.controller._action_trigger_crash_dump(
- self.req, fakes.FAKE_UUID, body={'trigger_crash_dump': None})
- trigger_crash_dump_mock.assert_called_once_with(
- self.req.environ['nova.context'], instance)
-
- @mock.patch.object(servers.ServersController, '_get_instance')
- def test_trigger_crash_dump_overridden_policy_failed_with_other_user(
- self, _get_instance_mock):
- _get_instance_mock.return_value = (
- fake_instance.fake_instance_obj(self.req.environ['nova.context']))
- rule_name = "os_compute_api:servers:trigger_crash_dump"
- self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
- # Change the user_id in request context.
- self.req.environ['nova.context'].user_id = 'other-user'
- self.req.api_version_request = (
- api_version_request.APIVersionRequest('2.17'))
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._action_trigger_crash_dump,
- self.req,
- fakes.FAKE_UUID,
- body={'trigger_crash_dump': None})
- self.assertEqual(
- "Policy doesn't allow %s to be performed." % rule_name,
- exc.format_message())
-
- @mock.patch('nova.compute.api.API.trigger_crash_dump')
- @mock.patch.object(servers.ServersController, '_get_instance')
- def test_trigger_crash_dump_overridden_policy_pass_with_same_user(
- self, _get_instance_mock, trigger_crash_dump_mock):
- instance = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- user_id=self.req.environ['nova.context'].user_id)
- _get_instance_mock.return_value = instance
- rule_name = "os_compute_api:servers:trigger_crash_dump"
- self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
- self.req.api_version_request = (
- api_version_request.APIVersionRequest('2.17'))
- self.controller._action_trigger_crash_dump(
- self.req, fakes.FAKE_UUID, body={'trigger_crash_dump': None})
- trigger_crash_dump_mock.assert_called_once_with(
- self.req.environ['nova.context'], instance)
-
- def test_index_policy_failed(self):
- rule_name = "os_compute_api:servers:index"
- rule = {rule_name: "project:non_fake"}
- self._common_policy_check(
- rule, rule_name, self.controller.index, self.req)
-
- def test_detail_policy_failed(self):
- rule_name = "os_compute_api:servers:detail"
- rule = {rule_name: "project:non_fake"}
- self._common_policy_check(
- rule, rule_name, self.controller.detail, self.req)
-
- def test_detail_get_tenants_policy_failed(self):
- req = fakes.HTTPRequest.blank('')
- req.GET["all_tenants"] = "True"
- rule_name = "os_compute_api:servers:detail:get_all_tenants"
- rule = {rule_name: "project:non_fake"}
- self._common_policy_check(
- rule, rule_name, self.controller._get_servers, req, True)
-
- def test_index_get_tenants_policy_failed(self):
- req = fakes.HTTPRequest.blank('')
- req.GET["all_tenants"] = "True"
- rule_name = "os_compute_api:servers:index:get_all_tenants"
- rule = {rule_name: "project:non_fake"}
- self._common_policy_check(
- rule, rule_name, self.controller._get_servers, req, False)
-
- @mock.patch.object(common, 'get_instance')
- def test_show_policy_failed(self, get_instance_mock):
- get_instance_mock.return_value = None
- rule_name = "os_compute_api:servers:show"
- rule = {rule_name: "project:non_fake"}
- self._common_policy_check(
- rule, rule_name, self.controller.show, self.req, FAKE_UUID)
-
- @mock.patch.object(common, 'get_instance')
- def test_delete_policy_failed_with_other_project(self, get_instance_mock):
- get_instance_mock.return_value = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'])
- rule_name = "os_compute_api:servers:delete"
- rule = {rule_name: "project_id:%(project_id)s"}
- # Change the project_id in request context.
- self.req.environ['nova.context'].project_id = 'other-project'
- self._common_policy_check(
- rule, rule_name, self.controller.delete, self.req, FAKE_UUID)
-
- @mock.patch('nova.compute.api.API.soft_delete')
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_delete_overridden_policy_pass_with_same_project(self,
- get_instance_mock,
- soft_delete_mock):
- self.flags(reclaim_instance_interval=3600)
- instance = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- project_id=self.req.environ['nova.context'].project_id)
- get_instance_mock.return_value = instance
- rule_name = "os_compute_api:servers:delete"
- self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
- self.controller.delete(self.req, fakes.FAKE_UUID)
- soft_delete_mock.assert_called_once_with(
- self.req.environ['nova.context'], instance)
-
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_delete_overridden_policy_failed_with_other_user_in_same_project(
- self, get_instance_mock):
- get_instance_mock.return_value = (
- fake_instance.fake_instance_obj(self.req.environ['nova.context']))
- rule_name = "os_compute_api:servers:delete"
- rule = {rule_name: "user_id:%(user_id)s"}
- # Change the user_id in request context.
- self.req.environ['nova.context'].user_id = 'other-user'
- self._common_policy_check(
- rule, rule_name, self.controller.delete, self.req, FAKE_UUID)
-
- @mock.patch('nova.compute.api.API.soft_delete')
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_delete_overridden_policy_pass_with_same_user(self,
- get_instance_mock,
- soft_delete_mock):
- self.flags(reclaim_instance_interval=3600)
- instance = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- user_id=self.req.environ['nova.context'].user_id)
- get_instance_mock.return_value = instance
- rule_name = "os_compute_api:servers:delete"
- self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
- self.controller.delete(self.req, fakes.FAKE_UUID)
- soft_delete_mock.assert_called_once_with(
- self.req.environ['nova.context'], instance)
-
- @mock.patch.object(common, 'get_instance')
- def test_update_policy_failed_with_other_project(self, get_instance_mock):
- get_instance_mock.return_value = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'])
- rule_name = "os_compute_api:servers:update"
- rule = {rule_name: "project_id:%(project_id)s"}
- body = {'server': {'name': 'server_test'}}
- # Change the project_id in request context.
- self.req.environ['nova.context'].project_id = 'other-project'
- self._common_policy_check(
- rule, rule_name, self.controller.update, self.req,
- FAKE_UUID, body=body)
-
- @mock.patch('nova.api.openstack.compute.views.servers.ViewBuilder.show')
- @mock.patch.object(compute_api.API, 'update_instance')
- @mock.patch.object(common, 'get_instance')
- def test_update_overridden_policy_pass_with_same_project(
- self, get_instance_mock, update_instance_mock, view_show_mock):
- instance = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- project_id=self.req.environ['nova.context'].project_id)
- get_instance_mock.return_value = instance
- rule_name = "os_compute_api:servers:update"
- self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
- body = {'server': {'name': 'server_test'}}
- self.controller.update(self.req, fakes.FAKE_UUID, body=body)
-
- @mock.patch.object(common, 'get_instance')
- def test_update_overridden_policy_failed_with_other_user_in_same_project(
- self, get_instance_mock):
- get_instance_mock.return_value = (
- fake_instance.fake_instance_obj(self.req.environ['nova.context']))
- rule_name = "os_compute_api:servers:update"
- rule = {rule_name: "user_id:%(user_id)s"}
- # Change the user_id in request context.
- self.req.environ['nova.context'].user_id = 'other-user'
- body = {'server': {'name': 'server_test'}}
- self._common_policy_check(
- rule, rule_name, self.controller.update, self.req,
- FAKE_UUID, body=body)
-
- @mock.patch('nova.api.openstack.common.'
- 'instance_has_port_with_resource_request', return_value=False)
- @mock.patch('nova.api.openstack.compute.views.servers.ViewBuilder.show')
- @mock.patch.object(compute_api.API, 'update_instance')
- @mock.patch.object(common, 'get_instance')
- def test_update_overridden_policy_pass_with_same_user(self,
- get_instance_mock,
- update_instance_mock,
- view_show_mock,
- mock_port_check):
- instance = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- user_id=self.req.environ['nova.context'].user_id)
- get_instance_mock.return_value = instance
- rule_name = "os_compute_api:servers:update"
- self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
- body = {'server': {'name': 'server_test'}}
- self.controller.update(self.req, fakes.FAKE_UUID, body=body)
-
- def test_confirm_resize_policy_failed(self):
- rule_name = "os_compute_api:servers:confirm_resize"
- rule = {rule_name: "project:non_fake"}
- body = {'server': {'name': 'server_test'}}
- self._common_policy_check(
- rule, rule_name, self.controller._action_confirm_resize,
- self.req, FAKE_UUID, body=body)
-
- def test_revert_resize_policy_failed(self):
- rule_name = "os_compute_api:servers:revert_resize"
- rule = {rule_name: "project:non_fake"}
- body = {'server': {'name': 'server_test'}}
- self._common_policy_check(
- rule, rule_name, self.controller._action_revert_resize,
- self.req, FAKE_UUID, body=body)
-
- def test_reboot_policy_failed(self):
- rule_name = "os_compute_api:servers:reboot"
- rule = {rule_name: "project:non_fake"}
- body = {'reboot': {'type': 'HARD'}}
- self._common_policy_check(
- rule, rule_name, self.controller._action_reboot,
- self.req, FAKE_UUID, body=body)
-
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_resize_policy_failed_with_other_project(self, get_instance_mock):
- get_instance_mock.return_value = (
- fake_instance.fake_instance_obj(self.req.environ['nova.context']))
- rule_name = "os_compute_api:servers:resize"
- rule = {rule_name: "project_id:%(project_id)s"}
- body = {'resize': {'flavorRef': '1'}}
- # Change the project_id in request context.
- self.req.environ['nova.context'].project_id = 'other-project'
- self._common_policy_check(
- rule, rule_name, self.controller._action_resize, self.req,
- FAKE_UUID, body=body)
-
- @mock.patch('nova.api.openstack.common.'
- 'instance_has_port_with_resource_request', return_value=False)
- @mock.patch('nova.compute.api.API.resize')
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_resize_overridden_policy_pass_with_same_project(self,
- get_instance_mock,
- resize_mock,
- mock_post_check):
- instance = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- project_id=self.req.environ['nova.context'].project_id)
- get_instance_mock.return_value = instance
- rule_name = "os_compute_api:servers:resize"
- self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
- body = {'resize': {'flavorRef': '1'}}
- self.controller._action_resize(self.req, fakes.FAKE_UUID, body=body)
- resize_mock.assert_called_once_with(self.req.environ['nova.context'],
- instance, '1',
- auto_disk_config=None)
-
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_resize_overridden_policy_failed_with_other_user_in_same_project(
- self, get_instance_mock):
- get_instance_mock.return_value = (
- fake_instance.fake_instance_obj(self.req.environ['nova.context']))
- rule_name = "os_compute_api:servers:resize"
- rule = {rule_name: "user_id:%(user_id)s"}
- # Change the user_id in request context.
- self.req.environ['nova.context'].user_id = 'other-user'
- body = {'resize': {'flavorRef': '1'}}
- self._common_policy_check(
- rule, rule_name, self.controller._action_resize, self.req,
- FAKE_UUID, body=body)
-
- @mock.patch('nova.api.openstack.common.'
- 'instance_has_port_with_resource_request', return_value=False)
- @mock.patch('nova.compute.api.API.resize')
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_resize_overridden_policy_pass_with_same_user(self,
- get_instance_mock,
- resize_mock,
- mock_port_check):
- instance = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- user_id=self.req.environ['nova.context'].user_id)
- get_instance_mock.return_value = instance
- rule_name = "os_compute_api:servers:resize"
- self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
- body = {'resize': {'flavorRef': '1'}}
- self.controller._action_resize(self.req, fakes.FAKE_UUID, body=body)
- resize_mock.assert_called_once_with(self.req.environ['nova.context'],
- instance, '1',
- auto_disk_config=None)
-
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_rebuild_policy_failed_with_other_project(self, get_instance_mock):
- get_instance_mock.return_value = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- project_id=self.req.environ['nova.context'].project_id)
- rule_name = "os_compute_api:servers:rebuild"
- rule = {rule_name: "project_id:%(project_id)s"}
- body = {'rebuild': {'imageRef': self.image_uuid}}
- # Change the project_id in request context.
- self.req.environ['nova.context'].project_id = 'other-project'
- self._common_policy_check(
- rule, rule_name, self.controller._action_rebuild,
- self.req, FAKE_UUID, body=body)
-
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_rebuild_overridden_policy_failed_with_other_user_in_same_project(
- self, get_instance_mock):
- get_instance_mock.return_value = (
- fake_instance.fake_instance_obj(self.req.environ['nova.context']))
- rule_name = "os_compute_api:servers:rebuild"
- rule = {rule_name: "user_id:%(user_id)s"}
- body = {'rebuild': {'imageRef': self.image_uuid}}
- # Change the user_id in request context.
- self.req.environ['nova.context'].user_id = 'other-user'
- self._common_policy_check(
- rule, rule_name, self.controller._action_rebuild,
- self.req, FAKE_UUID, body=body)
-
- @mock.patch('nova.api.openstack.compute.views.servers.ViewBuilder.show')
- @mock.patch('nova.compute.api.API.rebuild')
- @mock.patch('nova.api.openstack.common.get_instance')
- def test_rebuild_overridden_policy_pass_with_same_user(self,
- get_instance_mock,
- rebuild_mock,
- view_show_mock):
- instance = fake_instance.fake_instance_obj(
- self.req.environ['nova.context'],
- user_id=self.req.environ['nova.context'].user_id)
- get_instance_mock.return_value = instance
- rule_name = "os_compute_api:servers:rebuild"
- self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
- body = {'rebuild': {'imageRef': self.image_uuid,
- 'adminPass': 'dumpy_password'}}
- self.controller._action_rebuild(self.req, fakes.FAKE_UUID, body=body)
- rebuild_mock.assert_called_once_with(self.req.environ['nova.context'],
- instance,
- self.image_uuid,
- 'dumpy_password')
-
- def test_create_image_policy_failed(self):
- rule_name = "os_compute_api:servers:create_image"
- rule = {rule_name: "project:non_fake"}
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- },
- }
- self._common_policy_check(
- rule, rule_name, self.controller._action_create_image,
- self.req, FAKE_UUID, body=body)
-
- @mock.patch('nova.compute.utils.is_volume_backed_instance',
- return_value=True)
- @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
- @mock.patch.object(servers.ServersController, '_get_server')
- def test_create_vol_backed_img_snapshotting_policy_blocks_project(self,
- mock_get_server,
- mock_get_uuidi,
- mock_is_vol_back):
- """Don't permit a snapshot of a volume backed instance if configured
- not to based on project
- """
- rule_name = "os_compute_api:servers:create_image:allow_volume_backed"
- rules = {
- rule_name: "project:non_fake",
- "os_compute_api:servers:create_image": "",
- }
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- },
- }
- self._common_policy_check(
- rules, rule_name, self.controller._action_create_image,
- self.req, FAKE_UUID, body=body)
-
- @mock.patch('nova.compute.utils.is_volume_backed_instance',
- return_value=True)
- @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
- @mock.patch.object(servers.ServersController, '_get_server')
- def test_create_vol_backed_img_snapshotting_policy_blocks_role(self,
- mock_get_server,
- mock_get_uuidi,
- mock_is_vol_back):
- """Don't permit a snapshot of a volume backed instance if configured
- not to based on role
- """
- rule_name = "os_compute_api:servers:create_image:allow_volume_backed"
- rules = {
- rule_name: "role:non_fake",
- "os_compute_api:servers:create_image": "",
- }
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- },
- }
- self._common_policy_check(
- rules, rule_name, self.controller._action_create_image,
- self.req, FAKE_UUID, body=body)
-
- def _create_policy_check(self, rules, rule_name):
- flavor_ref = 'http://localhost/123/flavors/3'
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': self.image_uuid,
- 'flavorRef': flavor_ref,
- 'availability_zone': "zone1:host1:node1",
- 'block_device_mapping': [{'device_name': "/dev/sda1"}],
- 'networks': [{'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}],
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- },
- }
- self._common_policy_check(
- rules, rule_name, self.controller.create, self.req, body=body)
-
- def test_create_policy_failed(self):
- rule_name = "os_compute_api:servers:create"
- rules = {rule_name: "project:non_fake"}
- self._create_policy_check(rules, rule_name)
-
- def test_create_forced_host_policy_failed(self):
- rule_name = "os_compute_api:servers:create:forced_host"
- rule = {"os_compute_api:servers:create": "@",
- rule_name: "project:non_fake"}
- self._create_policy_check(rule, rule_name)
-
- def test_create_attach_volume_policy_failed(self):
- rule_name = "os_compute_api:servers:create:attach_volume"
- rules = {"os_compute_api:servers:create": "@",
- "os_compute_api:servers:create:forced_host": "@",
- rule_name: "project:non_fake"}
- self._create_policy_check(rules, rule_name)
-
- def test_create_attach_attach_network_policy_failed(self):
- rule_name = "os_compute_api:servers:create:attach_network"
- rules = {"os_compute_api:servers:create": "@",
- "os_compute_api:servers:create:forced_host": "@",
- "os_compute_api:servers:create:attach_volume": "@",
- rule_name: "project:non_fake"}
- self._create_policy_check(rules, rule_name)
-
-
class ServersActionsJsonTestV239(test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/unit/api/validation/extra_specs/test_validators.py b/nova/tests/unit/api/validation/extra_specs/test_validators.py
index 2ec5a43040..2670c5c5ac 100644
--- a/nova/tests/unit/api/validation/extra_specs/test_validators.py
+++ b/nova/tests/unit/api/validation/extra_specs/test_validators.py
@@ -29,7 +29,8 @@ class TestValidators(test.NoDBTestCase):
namespaces = {
'accel', 'aggregate_instance_extra_specs', 'capabilities', 'hw',
'hw_rng', 'hw_video', 'os', 'pci_passthrough', 'powervm', 'quota',
- 'resources', 'trait', 'vmware',
+ 'resources(?P<group>([a-zA-Z0-9_-]{1,64})?)',
+ 'trait(?P<group>([a-zA-Z0-9_-]{1,64})?)', 'vmware',
}
self.assertTrue(
namespaces.issubset(validators.NAMESPACES),
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 8ac5459f5f..1db1bff105 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -250,11 +250,17 @@ class BaseTestCase(test.TestCase):
# Just to make long lines short
self.rt = self.compute.rt
- self.mock_get_allocs = self.useFixture(
+ self.mock_get_allocations = self.useFixture(
fixtures.fixtures.MockPatch(
'nova.scheduler.client.report.SchedulerReportClient.'
'get_allocations_for_consumer')).mock
- self.mock_get_allocs.return_value = {}
+ self.mock_get_allocations.return_value = {}
+
+ self.mock_get_allocs = self.useFixture(
+ fixtures.fixtures.MockPatch(
+ 'nova.scheduler.client.report.SchedulerReportClient.'
+ 'get_allocs_for_consumer')).mock
+ self.mock_get_allocs.return_value = {'allocations': {}}
def tearDown(self):
ctxt = context.get_admin_context()
@@ -681,8 +687,9 @@ class ComputeVolumeTestCase(BaseTestCase):
'delete_on_termination': False,
}]
- image_meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping)
+ image_meta = utils.get_bdm_image_metadata(
+ self.context, self.compute_api.image_api,
+ self.compute_api.volume_api, block_device_mapping)
if metadata:
self.assertEqual(image_meta['properties']['vol_test_key'],
'vol_test_value')
@@ -701,8 +708,10 @@ class ComputeVolumeTestCase(BaseTestCase):
'delete_on_termination': False,
}]
- image_meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping, legacy_bdm=False)
+ image_meta = utils.get_bdm_image_metadata(
+ self.context, self.compute_api.image_api,
+ self.compute_api.volume_api, block_device_mapping,
+ legacy_bdm=False)
if metadata:
self.assertEqual(image_meta['properties']['vol_test_key'],
'vol_test_value')
@@ -734,8 +743,10 @@ class ComputeVolumeTestCase(BaseTestCase):
'delete_on_termination': True,
}]
- image_meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping, legacy_bdm=False)
+ image_meta = utils.get_bdm_image_metadata(
+ self.context, self.compute_api.image_api,
+ self.compute_api.volume_api, block_device_mapping,
+ legacy_bdm=False)
if metadata:
self.assertEqual('img_test_value',
@@ -4928,7 +4939,7 @@ class ComputeTestCase(BaseTestCase,
# which makes this a resize
mock_virt_mig.assert_called_once_with(self.context, migration,
instance, disk_info, 'fake-nwinfo1',
- test.MatchType(objects.ImageMeta), resize_instance,
+ test.MatchType(objects.ImageMeta), resize_instance, mock.ANY,
'fake-bdminfo', power_on)
mock_get_blk.assert_called_once_with(self.context, instance,
refresh_conn_info=True,
@@ -4943,6 +4954,8 @@ class ComputeTestCase(BaseTestCase,
mock_get_vol_connector.return_value, '/dev/vdb')
mock_attachment_complete.assert_called_once_with(
self.context, uuids.attachment_id)
+ self.mock_get_allocs.assert_called_once_with(self.context,
+ instance.uuid)
def test_finish_resize_from_active(self):
self._test_finish_resize(power_on=True)
@@ -12763,7 +12776,8 @@ class EvacuateHostTestCase(BaseTestCase):
mock_setup_instance_network_on_host.assert_called_once_with(
ctxt, self.inst, self.inst.host, migration,
provider_mappings=mock.sentinel.mapping)
- self.mock_get_allocs.assert_called_once_with(ctxt, self.inst.uuid)
+ self.mock_get_allocations.assert_called_once_with(ctxt,
+ self.inst.uuid)
mock_update_pci_req.assert_called_once_with(
ctxt, self.compute.reportclient, self.inst,
@@ -13109,8 +13123,8 @@ class ComputeInjectedFilesTestCase(BaseTestCase):
admin_password, allocations, nw_info, block_device_info,
db_api=None):
self.assertEqual(self.expected, injected_files)
- self.assertEqual(self.mock_get_allocs.return_value, allocations)
- self.mock_get_allocs.assert_called_once_with(instance.uuid)
+ self.assertEqual(self.mock_get_allocations.return_value, allocations)
+ self.mock_get_allocations.assert_called_once_with(instance.uuid)
def _test(self, injected_files, decoded_files):
self.expected = decoded_files
diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py
index 021295feb5..c3716e08ed 100644
--- a/nova/tests/unit/compute/test_compute_api.py
+++ b/nova/tests/unit/compute/test_compute_api.py
@@ -20,6 +20,7 @@ import ddt
import fixtures
import iso8601
import mock
+import os_traits as ot
from oslo_messaging import exceptions as oslo_exceptions
from oslo_serialization import jsonutils
from oslo_utils import fixture as utils_fixture
@@ -3515,11 +3516,14 @@ class _ComputeAPIUnitTestMixIn(object):
side_effect=get_vol_data):
if not is_bootable:
self.assertRaises(exception.InvalidBDMVolumeNotBootable,
- self.compute_api._get_bdm_image_metadata,
- self.context, block_device_mapping)
+ utils.get_bdm_image_metadata,
+ self.context, self.compute_api.image_api,
+ self.compute_api.volume_api,
+ block_device_mapping)
else:
- meta = self.compute_api._get_bdm_image_metadata(self.context,
- block_device_mapping)
+ meta = utils.get_bdm_image_metadata(
+ self.context, self.compute_api.image_api,
+ self.compute_api.volume_api, block_device_mapping)
self.assertEqual(expected_meta, meta)
def test_boot_volume_non_bootable(self):
@@ -3542,8 +3546,9 @@ class _ComputeAPIUnitTestMixIn(object):
{"min_ram": 256, "min_disk": 128, "foo": "bar"}}
with mock.patch.object(self.compute_api.volume_api, 'get',
return_value=fake_volume):
- meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping)
+ meta = utils.get_bdm_image_metadata(
+ self.context, self.compute_api.image_api,
+ self.compute_api.volume_api, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
@@ -3568,8 +3573,9 @@ class _ComputeAPIUnitTestMixIn(object):
mock.patch.object(self.compute_api.volume_api, 'get_snapshot',
return_value=fake_snapshot)) as (
volume_get, volume_get_snapshot):
- meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping)
+ meta = utils.get_bdm_image_metadata(
+ self.context, self.compute_api.image_api,
+ self.compute_api.volume_api, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
@@ -4442,8 +4448,10 @@ class _ComputeAPIUnitTestMixIn(object):
'device_name': 'vda',
}))]
self.assertRaises(exception.CinderConnectionFailed,
- self.compute_api._get_bdm_image_metadata,
+ utils.get_bdm_image_metadata,
self.context,
+ self.compute_api.image_api,
+ self.compute_api.volume_api,
bdms, legacy_bdm=True)
def test_get_volumes_for_bdms_errors(self):
@@ -5237,6 +5245,162 @@ class _ComputeAPIUnitTestMixIn(object):
rpcapi_unrescue_instance.assert_called_once_with(
self.context, instance=instance)
+ @mock.patch('nova.objects.compute_node.ComputeNode'
+ '.get_by_host_and_nodename')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_with_required_trait(self, mock_get_bdms,
+ mock_is_volume_backed,
+ mock_get_cn):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ with test.nested(
+ mock.patch.object(self.compute_api.placementclient,
+ 'get_provider_traits'),
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ mock_get_traits, mock_get_volume, mock_check_attached,
+ mock_instance_save, mock_record_start, mock_rpcapi_rescue
+ ):
+ # Mock out the returned compute node, bdms and volume
+ mock_get_cn.return_value = mock.Mock(uuid=uuids.cn)
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+
+ # Ensure the required trait is returned, allowing BFV rescue
+ mock_trait_info = mock.Mock(traits=[ot.COMPUTE_RESCUE_BFV])
+ mock_get_traits.return_value = mock_trait_info
+
+ # Try to rescue the instance
+ self.compute_api.rescue(self.context, instance,
+ rescue_image_ref=uuids.rescue_image_id,
+ allow_bfv_rescue=True)
+
+ # Assert all of the calls made in the compute API
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+ mock_get_cn.assert_called_once_with(
+ self.context, instance.host, instance.node)
+ mock_get_traits.assert_called_once_with(self.context, uuids.cn)
+ mock_instance_save.assert_called_once_with(
+ expected_task_state=[None])
+ mock_record_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ mock_rpcapi_rescue.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=uuids.rescue_image_id, clean_shutdown=True)
+
+ # Assert that the instance task state as set in the compute API
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+
+ @mock.patch('nova.objects.compute_node.ComputeNode'
+ '.get_by_host_and_nodename')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_without_required_trait(self, mock_get_bdms,
+ mock_is_volume_backed,
+ mock_get_cn):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ with test.nested(
+ mock.patch.object(self.compute_api.placementclient,
+ 'get_provider_traits'),
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ ) as (
+ mock_get_traits, mock_get_volume, mock_check_attached
+ ):
+ # Mock out the returned compute node, bdms and volume
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_get_cn.return_value = mock.Mock(uuid=uuids.cn)
+
+ # Ensure the required trait is not returned, denying BFV rescue
+ mock_trait_info = mock.Mock(traits=[])
+ mock_get_traits.return_value = mock_trait_info
+
+ # Assert that any attempt to rescue a bfv instance on a compute
+ # node that does not report the COMPUTE_RESCUE_BFV trait fails and
+ # raises InstanceNotRescuable
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context, instance,
+ rescue_image_ref=None, allow_bfv_rescue=True)
+
+ # Assert the calls made in the compute API prior to the failure
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+ mock_get_cn.assert_called_once_with(
+ self.context, instance.host, instance.node)
+ mock_get_traits.assert_called_once_with(
+ self.context, uuids.cn)
+
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_without_allow_flag(self, mock_get_bdms,
+ mock_is_volume_backed):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ with test.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ ) as (
+ mock_get_volume, mock_check_attached
+ ):
+ # Mock out the returned bdms and volume
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+
+ # Assert that any attempt to rescue a bfv instance with
+ # allow_bfv_rescue=False fails and raises InstanceNotRescuable
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context, instance,
+ rescue_image_ref=None, allow_bfv_rescue=False)
+
+ # Assert the calls made in the compute API prior to the failure
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+
def test_set_admin_password_invalid_state(self):
# Tests that InstanceInvalidState is raised when not ACTIVE.
instance = self._create_instance_obj({'vm_state': vm_states.STOPPED})
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index b8f2383796..c16349bbec 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -367,8 +367,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
test.MatchType(objects.ImageMeta), injected_files=[],
admin_password=None, allocations={}, network_info=[],
block_device_info='fake_bdm')
- self.mock_get_allocs.assert_called_once_with(self.context,
- instance.uuid)
+ self.mock_get_allocations.assert_called_once_with(self.context,
+ instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
self.assertNotIn('shelved_at', instance.system_metadata)
@@ -470,8 +470,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
test.MatchType(objects.ImageMeta),
injected_files=[], admin_password=None,
allocations={}, network_info=[], block_device_info='fake_bdm')
- self.mock_get_allocs.assert_called_once_with(self.context,
- instance.uuid)
+ self.mock_get_allocations.assert_called_once_with(self.context,
+ instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
diff --git a/nova/tests/unit/fake_policy.py b/nova/tests/unit/fake_policy.py
index abea3bdcc7..1af4fe2cf0 100644
--- a/nova/tests/unit/fake_policy.py
+++ b/nova/tests/unit/fake_policy.py
@@ -17,7 +17,32 @@ policy_data = """
{
"context_is_admin": "role:admin or role:administrator",
+ "network:attach_external_network": "",
+ "os_compute_api:servers:create": "",
+ "os_compute_api:servers:create:attach_volume": "",
+ "os_compute_api:servers:create:attach_network": "",
+ "os_compute_api:servers:create:forced_host": "",
+ "os_compute_api:servers:create:trusted_certs": "",
+ "os_compute_api:servers:create_image": "",
+ "os_compute_api:servers:create_image:allow_volume_backed": "",
+ "os_compute_api:servers:update": "",
+ "os_compute_api:servers:index": "",
+ "os_compute_api:servers:index:get_all_tenants": "",
+ "os_compute_api:servers:delete": "",
+ "os_compute_api:servers:detail": "",
+ "os_compute_api:servers:detail:get_all_tenants": "",
+ "os_compute_api:servers:show": "",
+ "os_compute_api:servers:rebuild": "",
+ "os_compute_api:servers:rebuild:trusted_certs": "",
+ "os_compute_api:servers:reboot": "",
+ "os_compute_api:servers:resize": "",
+ "os_compute_api:servers:revert_resize": "",
+ "os_compute_api:servers:confirm_resize": "",
+ "os_compute_api:servers:start": "",
+ "os_compute_api:servers:stop": "",
+ "os_compute_api:servers:trigger_crash_dump": "",
"os_compute_api:servers:show:host_status": "",
+ "os_compute_api:servers:show": "",
"os_compute_api:servers:show:host_status:unknown-only": "",
"os_compute_api:servers:allow_all_filters": "",
"os_compute_api:servers:migrations:force_complete": "",
@@ -32,6 +57,15 @@ policy_data = """
"os_compute_api:os-agents:create": "",
"os_compute_api:os-agents:update": "",
"os_compute_api:os-agents:delete": "",
+ "os_compute_api:os-aggregates:set_metadata": "",
+ "os_compute_api:os-aggregates:remove_host": "",
+ "os_compute_api:os-aggregates:add_host": "",
+ "os_compute_api:os-aggregates:create": "",
+ "os_compute_api:os-aggregates:index": "",
+ "os_compute_api:os-aggregates:update": "",
+ "os_compute_api:os-aggregates:delete": "",
+ "os_compute_api:os-aggregates:show": "",
+ "compute:aggregates:images": "",
"os_compute_api:os-attach-interfaces:list": "",
"os_compute_api:os-attach-interfaces:show": "",
"os_compute_api:os-attach-interfaces:create": "",
@@ -47,9 +81,13 @@ policy_data = """
"os_compute_api:ips:index": "",
"os_compute_api:ips:show": "",
"os_compute_api:extensions": "",
+ "os_compute_api:os-evacuate": "",
"os_compute_api:os-flavor-access:remove_tenant_access": "",
"os_compute_api:os-flavor-access:add_tenant_access": "",
"os_compute_api:os-flavor-access": "",
+ "os_compute_api:os-flavor-extra-specs:create": "",
+ "os_compute_api:os-flavor-extra-specs:update": "",
+ "os_compute_api:os-flavor-extra-specs:delete": "",
"os_compute_api:os-flavor-extra-specs:index": "",
"os_compute_api:os-flavor-extra-specs:show": "",
"os_compute_api:os-flavor-manage:create": "",
@@ -63,6 +101,10 @@ policy_data = """
"os_compute_api:os-instance-actions:events:details": "",
"os_compute_api:os-instance-usage-audit-log:list": "",
"os_compute_api:os-instance-usage-audit-log:show": "",
+ "os_compute_api:os-keypairs:index": "",
+ "os_compute_api:os-keypairs:create": "",
+ "os_compute_api:os-keypairs:show": "",
+ "os_compute_api:os-keypairs:delete": "",
"os_compute_api:os-hypervisors:list": "",
"os_compute_api:os-hypervisors:list-detail": "",
"os_compute_api:os-hypervisors:statistics": "",
@@ -96,6 +138,7 @@ policy_data = """
"os_compute_api:os-server-diagnostics": "",
"os_compute_api:os-server-password:show": "",
"os_compute_api:os-server-password:clear": "",
+ "os_compute_api:os-server-external-events:create": "",
"os_compute_api:os-server-tags:index": "",
"os_compute_api:os-server-tags:show": "",
"os_compute_api:os-server-tags:update": "",
@@ -104,6 +147,7 @@ policy_data = """
"os_compute_api:os-server-tags:delete_all": "",
"os_compute_api:os-server-groups:show": "",
"os_compute_api:os-server-groups:index": "",
+ "os_compute_api:os-server-groups:index:all_projects": "",
"os_compute_api:os-server-groups:create": "",
"os_compute_api:os-server-groups:delete": "",
"os_compute_api:os-services:list": "",
@@ -133,6 +177,8 @@ policy_data = """
"os_compute_api:server-metadata:update_all": "",
"os_compute_api:server-metadata:delete": "",
"os_compute_api:server-metadata:show": "",
- "os_compute_api:server-metadata:index": ""
+ "os_compute_api:server-metadata:index": "",
+ "compute:server:topology:index": "",
+ "compute:server:topology:host:index": "is_admin:True"
}
"""
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index 92547abdc4..3e6dc6417b 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -1461,6 +1461,8 @@ class TestAPI(TestAPIBase):
"""Only one network is available, it's external, and the client
is unauthorized to use it.
"""
+ rules = {'network:attach_external_network': 'is_admin:True'}
+ policy.set_rules(oslo_policy.Rules.from_dict(rules))
mocked_client = mock.create_autospec(client.Client)
mock_get_client.return_value = mocked_client
self.instance = fake_instance.fake_instance_obj(self.context,
@@ -7025,6 +7027,8 @@ class TestAllocateForInstance(test.NoDBTestCase):
requested_networks, ordered_networks)
def test_validate_requested_network_ids_raises_forbidden(self):
+ rules = {'network:attach_external_network': 'is_admin:True'}
+ policy.set_rules(oslo_policy.Rules.from_dict(rules))
self._assert_validate_requested_network_ids_raises(
exception.ExternalNetworkAttachForbidden,
[{'id': "net1", 'router:external': True, 'shared': False}])
diff --git a/nova/tests/unit/policies/base.py b/nova/tests/unit/policies/base.py
index e9ae7b142c..f1f8350c4b 100644
--- a/nova/tests/unit/policies/base.py
+++ b/nova/tests/unit/policies/base.py
@@ -114,6 +114,8 @@ class BasePolicyTest(test.TestCase):
"role:admin and system_scope:all",
"system_reader_api":
"role:reader and system_scope:all",
+ "project_member_api":
+ "role:member and project_id:%(project_id)s",
})
self.policy.set_rules(self.rules_without_deprecation,
overwrite=False)
@@ -147,9 +149,15 @@ class BasePolicyTest(test.TestCase):
def ensure_raises(req, *args, **kwargs):
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, req, *arg, **kwarg)
- self.assertEqual(
- "Policy doesn't allow %s to be performed." %
- rule_name, exc.format_message())
+ # NOTE(gmann): In case of multi-policy APIs, PolicyNotAuthorized
+ # exception can be raised from either of the policy so checking
+ # the error message, which includes the rule name, can mismatch.
+ # Tests verifying the multi policy can pass rule_name as None
+ # to skip the error message assert.
+ if rule_name is not None:
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." %
+ rule_name, exc.format_message())
# Verify all the context having allowed scope and roles pass
# the policy check.
for context in authorized_contexts:
@@ -171,8 +179,26 @@ class BasePolicyTest(test.TestCase):
args1 = copy.deepcopy(arg)
kwargs1 = copy.deepcopy(kwarg)
if not fatal:
- unauthorize_response.append(
- ensure_return(req, *args1, **kwargs1))
+ try:
+ unauthorize_response.append(
+ ensure_return(req, *args1, **kwargs1))
+ # NOTE(gmann): We need to ignore the PolicyNotAuthorized
+ # exception here so that we can add the correct response
+ # in unauthorize_response for the case of fatal=False.
+ # This handle the case of multi policy checks where tests
+ # are verifying the second policy via the response of
+ # fatal-False and ignoring the response checks where the
+ # first policy itself fail to pass (even test override the
+ # first policy to allow for everyone but still, scope
+ # checks can leads to PolicyNotAuthorized error).
+ # For example: flavor extra specs policy for GET flavor
+ # API. In that case, flavor extra spec policy is checked
+ # after the GET flavor policy. So any context failing on
+ # GET flavor will raise the PolicyNotAuthorized and for
+ # that case we do not have any way to verify the flavor
+ # extra specs so skip that context to check in test.
+ except exception.PolicyNotAuthorized:
+ continue
else:
ensure_raises(req, *args1, **kwargs1)
diff --git a/nova/tests/unit/policies/test_admin_actions.py b/nova/tests/unit/policies/test_admin_actions.py
index bf0356031e..1bcd7faae0 100644
--- a/nova/tests/unit/policies/test_admin_actions.py
+++ b/nova/tests/unit/policies/test_admin_actions.py
@@ -98,6 +98,15 @@ class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
super(AdminActionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+
+class AdminActionsNoLegacyPolicyTest(AdminActionsScopeTypePolicyTest):
+ """Test Admin Actions APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(AdminActionsScopeTypePolicyTest, self).setUp()
# Check that system admin is able to perform the system level actions
# on server.
self.admin_authorized_contexts = [
diff --git a/nova/tests/unit/policies/test_aggregates.py b/nova/tests/unit/policies/test_aggregates.py
index 2bcfa516c9..215320f10d 100644
--- a/nova/tests/unit/policies/test_aggregates.py
+++ b/nova/tests/unit/policies/test_aggregates.py
@@ -43,11 +43,23 @@ class AggregatesPolicyTest(base.BasePolicyTest):
self.project_foo_context, self.project_reader_context
]
+ # Check that system reader is able to get Aggregate
+ self.system_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-admin is not able to get Aggregate
+ self.system_reader_unauthorized_contexts = [
+ self.system_foo_context, self.project_member_context,
+ self.other_project_member_context,
+ self.project_foo_context, self.project_reader_context
+ ]
+
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate_list')
def test_list_aggregate_policy(self, mock_list):
rule_name = "os_compute_api:os-aggregates:index"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
+ self.common_policy_check(self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
rule_name, self.controller.index,
self.req)
@@ -87,8 +99,8 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate')
def test_show_aggregate_policy(self, mock_show):
rule_name = "os_compute_api:os-aggregates:show"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
+ self.common_policy_check(self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
rule_name, self.controller.show,
self.req, 1)
@@ -162,3 +174,14 @@ class AggregatesScopeTypePolicyTest(AggregatesPolicyTest):
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
+ # Check that system reader is able to get Aggregate
+ self.system_reader_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-admin is not able to get Aggregate
+ self.system_reader_unauthorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.system_foo_context, self.project_member_context,
+ self.other_project_member_context,
+ self.project_foo_context, self.project_reader_context
+ ]
diff --git a/nova/tests/unit/policies/test_evacuate.py b/nova/tests/unit/policies/test_evacuate.py
index ec891c949b..43b9634cdd 100644
--- a/nova/tests/unit/policies/test_evacuate.py
+++ b/nova/tests/unit/policies/test_evacuate.py
@@ -130,7 +130,16 @@ class EvacuateScopeTypePolicyTest(EvacuatePolicyTest):
super(EvacuateScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- self.user_req.environ['nova.context'].system_scope = 'all'
+
+class EvacuateNoLegacyPolicyTest(EvacuateScopeTypePolicyTest):
+ """Test Evacuate APIs policies with system scope enabled,
+ and no more deprecated rules that allow the legacy admin API to
+ access system APIs.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(EvacuateNoLegacyPolicyTest, self).setUp()
# Check that system admin is able to evacuate server.
self.admin_authorized_contexts = [
diff --git a/nova/tests/unit/policies/test_flavor_extra_specs.py b/nova/tests/unit/policies/test_flavor_extra_specs.py
new file mode 100644
index 0000000000..3129cb6213
--- /dev/null
+++ b/nova/tests/unit/policies/test_flavor_extra_specs.py
@@ -0,0 +1,414 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+import mock
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova.api.openstack.compute import flavor_manage
+from nova.api.openstack.compute import flavors
+from nova.api.openstack.compute import flavors_extraspecs
+from nova.api.openstack.compute import servers
+from nova.compute import vm_states
+from nova import objects
+from nova.policies import flavor_extra_specs as policies
+from nova.policies import flavor_manage as fm_policies
+from nova.policies import servers as s_policies
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_flavor
+from nova.tests.unit import fake_instance
+from nova.tests.unit.policies import base
+
+
+class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
+ """Test Flavor Extra Specs APIs policies with all possible context.
+ This class defines the set of context with different roles
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will call the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(FlavorExtraSpecsPolicyTest, self).setUp()
+ self.controller = flavors_extraspecs.FlavorExtraSpecsController()
+ self.flavor_ctrl = flavors.FlavorsController()
+ self.fm_ctrl = flavor_manage.FlavorManageController()
+ self.server_ctrl = servers.ServersController()
+ self.req = fakes.HTTPRequest.blank('')
+ self.server_ctrl._view_builder._add_security_grps = mock.MagicMock()
+ self.server_ctrl._view_builder._get_metadata = mock.MagicMock()
+ self.server_ctrl._view_builder._get_addresses = mock.MagicMock()
+ self.server_ctrl._view_builder._get_host_id = mock.MagicMock()
+ self.server_ctrl._view_builder._get_fault = mock.MagicMock()
+ self.server_ctrl._view_builder._add_host_status = mock.MagicMock()
+
+ self.instance = fake_instance.fake_instance_obj(
+ self.project_member_context,
+ id=1, uuid=uuids.fake_id, project_id=self.project_id,
+ vm_state=vm_states.ACTIVE)
+
+ self.mock_get = self.useFixture(
+ fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
+ self.mock_get.return_value = self.instance
+
+ fakes.stub_out_secgroup_api(
+ self, security_groups=[{'name': 'default'}])
+ self.mock_get_all = self.useFixture(fixtures.MockPatchObject(
+ self.server_ctrl.compute_api, 'get_all')).mock
+ self.mock_get_all.return_value = objects.InstanceList(
+ objects=[self.instance])
+
+ def get_flavor_extra_specs(context, flavor_id):
+ return fake_flavor.fake_flavor_obj(
+ self.project_member_context,
+ id=1, uuid=uuids.fake_id, project_id=self.project_id,
+ is_public=False, extra_specs={'hw:cpu_policy': 'shared'},
+ expected_attrs='extra_specs')
+
+ self.stub_out('nova.api.openstack.common.get_flavor',
+ get_flavor_extra_specs)
+
+ # Check that all are able to get flavor extra specs.
+ self.all_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ self.all_unauthorized_contexts = []
+ # Check that all system scoped are able to get flavor extra specs.
+ self.all_system_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ self.all_system_unauthorized_contexts = []
+
+ # Check that admin is able to create, update and delete flavor
+ # extra specs.
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # Check that non-admin is not able to create, update and
+ # delete flavor extra specs.
+ self.admin_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+
+ @mock.patch('nova.objects.Flavor.save')
+ def test_create_flavor_extra_specs_policy(self, mock_save):
+ body = {'extra_specs': {'hw:numa_nodes': '1'}}
+ rule_name = policies.POLICY_ROOT % 'create'
+ self.common_policy_check(self.admin_authorized_contexts,
+ self.admin_unauthorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, '1234',
+ body=body)
+
+ @mock.patch('nova.objects.Flavor._flavor_extra_specs_del')
+ @mock.patch('nova.objects.Flavor.save')
+ def test_delete_flavor_extra_specs_policy(self, mock_save, mock_delete):
+ rule_name = policies.POLICY_ROOT % 'delete'
+ self.common_policy_check(self.admin_authorized_contexts,
+ self.admin_unauthorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, '1234', 'hw:cpu_policy')
+
+ @mock.patch('nova.objects.Flavor.save')
+ def test_update_flavor_extra_specs_policy(self, mock_save):
+ body = {'hw:cpu_policy': 'shared'}
+ rule_name = policies.POLICY_ROOT % 'update'
+ self.common_policy_check(self.admin_authorized_contexts,
+ self.admin_unauthorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, '1234', 'hw:cpu_policy',
+ body=body)
+
+ def test_show_flavor_extra_specs_policy(self):
+ rule_name = policies.POLICY_ROOT % 'show'
+ self.common_policy_check(self.all_authorized_contexts,
+ self.all_unauthorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, '1234',
+ 'hw:cpu_policy')
+
+ def test_index_flavor_extra_specs_policy(self):
+ rule_name = policies.POLICY_ROOT % 'index'
+ self.common_policy_check(self.all_authorized_contexts,
+ self.all_unauthorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, '1234')
+
+ def test_flavor_detail_with_extra_specs_policy(self):
+ fakes.stub_out_flavor_get_all(self)
+ rule_name = policies.POLICY_ROOT % 'index'
+ req = fakes.HTTPRequest.blank('', version='2.61')
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.all_authorized_contexts, self.all_unauthorized_contexts,
+ rule_name, self.flavor_ctrl.detail, req,
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['flavors'][0])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['flavors'][0])
+
+ def test_flavor_show_with_extra_specs_policy(self):
+ fakes.stub_out_flavor_get_by_flavor_id(self)
+ rule_name = policies.POLICY_ROOT % 'index'
+ req = fakes.HTTPRequest.blank('', version='2.61')
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.all_authorized_contexts, self.all_unauthorized_contexts,
+ rule_name, self.flavor_ctrl.show, req, '1',
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['flavor'])
+
+ def test_flavor_create_with_extra_specs_policy(self):
+ rule_name = policies.POLICY_ROOT % 'index'
+ # 'create' policy is checked before flavor extra specs 'index' policy
+ # so we have to allow it for everyone otherwise it will fail first
+ # for unauthorized contexts.
+ rule = fm_policies.POLICY_ROOT % 'create'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.61')
+
+ def fake_create(newflavor):
+ newflavor['flavorid'] = uuids.fake_id
+ newflavor["name"] = 'test'
+ newflavor["memory_mb"] = 512
+ newflavor["vcpus"] = 2
+ newflavor["root_gb"] = 1
+ newflavor["ephemeral_gb"] = 1
+ newflavor["swap"] = 512
+ newflavor["rxtx_factor"] = 1.0
+ newflavor["is_public"] = True
+ newflavor["disabled"] = False
+ newflavor["extra_specs"] = {}
+
+ self.stub_out("nova.objects.Flavor.create", fake_create)
+ body = {
+ "flavor": {
+ "name": "test",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ }
+ }
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.all_system_authorized_contexts,
+ self.all_system_unauthorized_contexts,
+ rule_name, self.fm_ctrl._create, req, body=body,
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['flavor'])
+
+ @mock.patch('nova.objects.Flavor.save')
+ def test_flavor_update_with_extra_specs_policy(self, mock_save):
+ fakes.stub_out_flavor_get_by_flavor_id(self)
+ rule_name = policies.POLICY_ROOT % 'index'
+ # 'update' policy is checked before flavor extra specs 'index' policy
+ # so we have to allow it for everyone otherwise it will fail first
+ # for unauthorized contexts.
+ rule = fm_policies.POLICY_ROOT % 'update'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.61')
+
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.all_system_authorized_contexts,
+ self.all_system_unauthorized_contexts,
+ rule_name, self.fm_ctrl._update, req, '1',
+ body={'flavor': {'description': None}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['flavor'])
+
+ def test_server_detail_with_extra_specs_policy(self):
+ rule = s_policies.SERVERS % 'detail'
+ # server 'detail' policy is checked before flavor extra specs 'index'
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.POLICY_ROOT % 'index'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.all_authorized_contexts, self.all_unauthorized_contexts,
+ rule_name, self.server_ctrl.detail, req,
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['servers'][0]['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['servers'][0]['flavor'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ def test_server_show_with_extra_specs_policy(self, mock_get, mock_block):
+ rule = s_policies.SERVERS % 'show'
+ # server 'show' policy is checked before flavor extra specs 'index'
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.POLICY_ROOT % 'index'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.all_authorized_contexts,
+ self.all_unauthorized_contexts,
+ rule_name, self.server_ctrl.show, req, 'fake',
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['server']['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['server']['flavor'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_server_rebuild_with_extra_specs_policy(self, mock_rebuild,
+ mock_get, mock_bdm):
+ rule = s_policies.SERVERS % 'rebuild'
+ # server 'rebuild' policy is checked before flavor extra specs 'index'
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.POLICY_ROOT % 'index'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.all_authorized_contexts,
+ self.all_unauthorized_contexts,
+ rule_name, self.server_ctrl._action_rebuild,
+ req, self.instance.uuid,
+ body={'rebuild': {"imageRef": uuids.fake_id}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp.obj['server']['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp.obj['server']['flavor'])
+
+ @mock.patch('nova.compute.api.API.update_instance')
+ def test_server_update_with_extra_specs_policy(self, mock_update):
+ rule = s_policies.SERVERS % 'update'
+ # server 'update' policy is checked before flavor extra specs 'index'
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.POLICY_ROOT % 'index'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.all_authorized_contexts,
+ self.all_unauthorized_contexts,
+ rule_name, self.server_ctrl.update,
+ req, self.instance.uuid,
+ body={'server': {'name': 'test'}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['server']['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['server']['flavor'])
+
+
+class FlavorExtraSpecsScopeTypePolicyTest(FlavorExtraSpecsPolicyTest):
+ """Test Flavor Extra Specs APIs policies with system scope enabled.
+ This class set the nova.conf [oslo_policy] enforce_scope to True
+ so that we can switch on the scope checking on oslo policy side.
+ It defines the set of context with scoped token
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will run the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(FlavorExtraSpecsScopeTypePolicyTest, self).setUp()
+ self.flags(enforce_scope=True, group="oslo_policy")
+
+ # Check that all system scoped are able to get flavor extra specs.
+ self.all_system_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context, self.system_foo_context
+ ]
+ self.all_system_unauthorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ # Check that system admin is able to create, update and delete flavor
+ # extra specs.
+ self.admin_authorized_contexts = [
+ self.system_admin_context]
+ # Check that non-system admin is not able to create, update and
+ # delete flavor extra specs.
+ self.admin_unauthorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+
+
+class FlavorExtraSpecsNoLegacyPolicyTest(FlavorExtraSpecsScopeTypePolicyTest):
+ """Test Flavor Extra Specs APIs policies with system scope enabled,
+ and no more deprecated rules that allow the legacy admin API to
+ access system_admin_or_owner APIs.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(FlavorExtraSpecsNoLegacyPolicyTest, self).setUp()
+ # Check that system or project reader are able to get flavor
+ # extra specs.
+ self.all_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.system_member_context,
+ self.system_reader_context, self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ self.all_unauthorized_contexts = [
+ self.project_foo_context, self.system_foo_context
+ ]
+ # Check that all system scoped reader are able to get flavor
+ # extra specs.
+ self.all_system_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context
+ ]
+ self.all_system_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_foo_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
diff --git a/nova/tests/unit/policies/test_hypervisors.py b/nova/tests/unit/policies/test_hypervisors.py
index abd5a50da1..4bd054ea26 100644
--- a/nova/tests/unit/policies/test_hypervisors.py
+++ b/nova/tests/unit/policies/test_hypervisors.py
@@ -40,7 +40,7 @@ class HypervisorsPolicyTest(base.BasePolicyTest):
# perform operations on hypervisors.
# NOTE(gmann): Until old default rule which is admin_api is
# deprecated and not removed, project admin and legacy admin
- # will be able to read the agent data. This make sure that existing
+ # will be able to get hypervisors. This make sure that existing
# tokens will keep working even we have changed this policy defaults
# to reader role.
self.reader_authorized_contexts = [
diff --git a/nova/tests/unit/policies/test_instance_actions.py b/nova/tests/unit/policies/test_instance_actions.py
index 5102aa1578..43b8e2af6f 100644
--- a/nova/tests/unit/policies/test_instance_actions.py
+++ b/nova/tests/unit/policies/test_instance_actions.py
@@ -219,20 +219,6 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
super(InstanceActionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to get the
- # instance action events
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to
- # get the instance action events
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context
- ]
-
@mock.patch('nova.objects.InstanceActionEventList.get_by_action')
@mock.patch('nova.objects.InstanceAction.get_by_request_id')
def test_show_instance_action_policy_with_show_details(
diff --git a/nova/tests/unit/policies/test_instance_usage_audit_log.py b/nova/tests/unit/policies/test_instance_usage_audit_log.py
index 5611644b76..3ab4e6138f 100644
--- a/nova/tests/unit/policies/test_instance_usage_audit_log.py
+++ b/nova/tests/unit/policies/test_instance_usage_audit_log.py
@@ -38,9 +38,9 @@ class InstanceUsageAuditLogPolicyTest(base.BasePolicyTest):
# Check that admin is able to get instance usage audit log.
# NOTE(gmann): Until old default rule which is admin_api is
# deprecated and not removed, project admin and legacy admin
- # will be able to read the agent data. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
+ # will be able to get instance usage audit log. This make sure
+ # that existing tokens will keep working even we have changed
+ # this policy defaults to reader role.
self.reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.system_member_context,
diff --git a/nova/tests/unit/policies/test_keypairs.py b/nova/tests/unit/policies/test_keypairs.py
new file mode 100644
index 0000000000..6b6166add6
--- /dev/null
+++ b/nova/tests/unit/policies/test_keypairs.py
@@ -0,0 +1,209 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from nova.policies import keypairs as policies
+
+from nova.api.openstack.compute import keypairs
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_keypair
+from nova.tests.unit.policies import base
+
+
+class KeypairsPolicyTest(base.BasePolicyTest):
+ """Test Keypairs APIs policies with all possible context.
+ This class defines the set of context with different roles
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will call the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(KeypairsPolicyTest, self).setUp()
+ self.controller = keypairs.KeypairController()
+ self.req = fakes.HTTPRequest.blank('')
+
+ # Check that everyone is able to create, delete and get
+ # their keypairs.
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context
+ ]
+ self.everyone_unauthorized_contexts = []
+
+ # Check that admin is able to create, delete and get
+ # other users keypairs.
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # Check that non-admin is not able to create, delete and get
+ # other users keypairs.
+ self.admin_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context
+ ]
+
+ # Check that system reader is able to get
+ # other users keypairs.
+ self.system_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system reader is not able to get
+ # other users keypairs.
+ self.system_reader_unauthorized_contexts = [
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context
+ ]
+
+ @mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
+ def test_index_keypairs_policy(self, mock_get):
+ rule_name = policies.POLICY_ROOT % 'index'
+ self.common_policy_check(self.everyone_authorized_contexts,
+ self.everyone_unauthorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
+
+ @mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
+ def test_index_others_keypairs_policy(self, mock_get):
+ req = fakes.HTTPRequest.blank('?user_id=user2', version='2.10')
+ rule_name = policies.POLICY_ROOT % 'index'
+ self.common_policy_check(self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name,
+ self.controller.index,
+ req)
+
+ @mock.patch('nova.compute.api.KeypairAPI.get_key_pair')
+ def test_show_keypairs_policy(self, mock_get):
+ rule_name = policies.POLICY_ROOT % 'show'
+ self.common_policy_check(self.everyone_authorized_contexts,
+ self.everyone_unauthorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, fakes.FAKE_UUID)
+
+ @mock.patch('nova.compute.api.KeypairAPI.get_key_pair')
+ def test_show_others_keypairs_policy(self, mock_get):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('?user_id=user2', version='2.10')
+ rule_name = policies.POLICY_ROOT % 'show'
+ self.common_policy_check(self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name,
+ self.controller.show,
+ req, fakes.FAKE_UUID)
+
+ @mock.patch('nova.compute.api.KeypairAPI.create_key_pair')
+ def test_create_keypairs_policy(self, mock_create):
+ rule_name = policies.POLICY_ROOT % 'create'
+ mock_create.return_value = (test_keypair.fake_keypair, 'FAKE_KEY')
+ self.common_policy_check(self.everyone_authorized_contexts,
+ self.everyone_unauthorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req,
+ body={'keypair': {'name': 'create_test'}})
+
+ @mock.patch('nova.compute.api.KeypairAPI.create_key_pair')
+ def test_create_others_keypairs_policy(self, mock_create):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('', version='2.10')
+ rule_name = policies.POLICY_ROOT % 'create'
+ mock_create.return_value = (test_keypair.fake_keypair, 'FAKE_KEY')
+ body = {'keypair': {'name': 'test2', 'user_id': 'user2'}}
+ self.common_policy_check(self.admin_authorized_contexts,
+ self.admin_unauthorized_contexts,
+ rule_name,
+ self.controller.create,
+ req, body=body)
+
+ @mock.patch('nova.compute.api.KeypairAPI.delete_key_pair')
+ def test_delete_keypairs_policy(self, mock_delete):
+ rule_name = policies.POLICY_ROOT % 'delete'
+ self.common_policy_check(self.everyone_authorized_contexts,
+ self.everyone_unauthorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, fakes.FAKE_UUID)
+
+ @mock.patch('nova.compute.api.KeypairAPI.delete_key_pair')
+ def test_delete_others_keypairs_policy(self, mock_delete):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('?user_id=user2', version='2.10')
+ rule_name = policies.POLICY_ROOT % 'delete'
+ self.common_policy_check(self.admin_authorized_contexts,
+ self.admin_unauthorized_contexts,
+ rule_name,
+ self.controller.delete,
+ req, fakes.FAKE_UUID)
+
+
+class KeypairsScopeTypePolicyTest(KeypairsPolicyTest):
+ """Test Keypairs APIs policies with system scope enabled.
+ This class set the nova.conf [oslo_policy] enforce_scope to True
+ so that we can switch on the scope checking on oslo policy side.
+ It defines the set of context with scoped token
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will run the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(KeypairsScopeTypePolicyTest, self).setUp()
+ self.flags(enforce_scope=True, group="oslo_policy")
+
+
+class KeypairsNoLegacyPolicyTest(KeypairsScopeTypePolicyTest):
+ """Test Keypairs APIs policies with system scope enabled,
+ and no more deprecated rules that allow the legacy admin API to
+ access system APIs.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(KeypairsNoLegacyPolicyTest, self).setUp()
+
+ # Check that system admin is able to create, delete and get
+ # other users keypairs.
+ self.admin_authorized_contexts = [
+ self.system_admin_context]
+ # Check that system non-admin is not able to create, delete and get
+ # other users keypairs.
+ self.admin_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_member_context,
+ self.system_reader_context, self.system_foo_context,
+ self.project_admin_context, self.project_member_context,
+ self.other_project_member_context,
+ self.project_foo_context, self.project_reader_context
+ ]
+ # Check that system reader is able to get
+ # other users keypairs.
+ self.system_reader_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system reader is not able to get
+ # other users keypairs.
+ self.system_reader_unauthorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context
+ ]
diff --git a/nova/tests/unit/policies/test_limits.py b/nova/tests/unit/policies/test_limits.py
index 8ff7667936..8760610002 100644
--- a/nova/tests/unit/policies/test_limits.py
+++ b/nova/tests/unit/policies/test_limits.py
@@ -66,7 +66,7 @@ class LimitsPolicyTest(base.BasePolicyTest):
# Check that system reader is able to get other projects limit.
# NOTE(gmann): Until old default rule which is admin_api is
# deprecated and not removed, project admin and legacy admin
- # will be able to read the agent data. This make sure that existing
+ # will be able to get limit. This make sure that existing
# tokens will keep working even we have changed this policy defaults
# to reader role.
self.reader_authorized_contexts = [
diff --git a/nova/tests/unit/policies/test_lock_server.py b/nova/tests/unit/policies/test_lock_server.py
index 4925214022..21274481b1 100644
--- a/nova/tests/unit/policies/test_lock_server.py
+++ b/nova/tests/unit/policies/test_lock_server.py
@@ -12,14 +12,14 @@
import fixtures
import mock
-from nova.policies import base as base_policy
-from nova.policies import lock_server as ls_policies
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import lock_server
from nova.compute import vm_states
from nova import exception
+from nova.policies import base as base_policy
+from nova.policies import lock_server as ls_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
@@ -49,7 +49,7 @@ class LockServerPolicyTest(base.BasePolicyTest):
self.mock_get.return_value = self.instance
# Check that admin or and server owner is able to lock/unlock
- # the sevrer
+ # the server
self.admin_or_owner_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
@@ -157,7 +157,7 @@ class LockServerNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
def setUp(self):
super(LockServerNoLegacyPolicyTest, self).setUp()
# Check that system admin or and server owner is able to lock/unlock
- # the sevrer
+ # the server
self.admin_or_owner_authorized_contexts = [
self.system_admin_context,
self.project_admin_context, self.project_member_context]
diff --git a/nova/tests/unit/policies/test_pause_server.py b/nova/tests/unit/policies/test_pause_server.py
index e279206612..fa2c37d0b6 100644
--- a/nova/tests/unit/policies/test_pause_server.py
+++ b/nova/tests/unit/policies/test_pause_server.py
@@ -12,13 +12,13 @@
import fixtures
import mock
-from nova.policies import pause_server as ps_policies
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import pause_server
from nova.compute import vm_states
from nova import exception
+from nova.policies import pause_server as ps_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
@@ -48,7 +48,7 @@ class PauseServerPolicyTest(base.BasePolicyTest):
self.mock_get.return_value = self.instance
# Check that admin or and server owner is able to pause/unpause
- # the sevrer
+ # the server
self.admin_or_owner_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
@@ -95,7 +95,7 @@ class PauseServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.pause')
- def test_pause_sevrer_overridden_policy_pass_with_same_user(
+ def test_pause_server_overridden_policy_pass_with_same_user(
self, mock_pause):
rule_name = ps_policies.POLICY_ROOT % 'pause'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
@@ -129,7 +129,7 @@ class PauseServerNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
def setUp(self):
super(PauseServerNoLegacyPolicyTest, self).setUp()
# Check that system admin or server owner is able to pause/unpause
- # the sevrer
+ # the server
self.admin_or_owner_authorized_contexts = [
self.system_admin_context,
self.project_admin_context, self.project_member_context]
diff --git a/nova/tests/unit/policies/test_quota_class_sets.py b/nova/tests/unit/policies/test_quota_class_sets.py
new file mode 100644
index 0000000000..4f9b228bc8
--- /dev/null
+++ b/nova/tests/unit/policies/test_quota_class_sets.py
@@ -0,0 +1,127 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.api.openstack.compute import quota_classes
+from nova.policies import quota_class_sets as policies
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.policies import base
+
+
+class QuotaClassSetsPolicyTest(base.BasePolicyTest):
+ """Test Quota Class Set APIs policies with all possible context.
+ This class defines the set of context with different roles
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will call the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(QuotaClassSetsPolicyTest, self).setUp()
+ self.controller = quota_classes.QuotaClassSetsController()
+ self.req = fakes.HTTPRequest.blank('')
+
+ # Check that admin is able to update quota class
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # Check that non-admin is not able to update quota class
+ self.admin_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context
+ ]
+ # Check that system reader is able to get quota class
+ self.system_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system reader is not able to get quota class
+ self.system_reader_unauthorized_contexts = [
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context
+ ]
+
+ @mock.patch('nova.objects.Quotas.update_class')
+ def test_update_quota_class_sets_policy(self, mock_update):
+ rule_name = policies.POLICY_ROOT % 'update'
+ body = {'quota_class_set':
+ {'metadata_items': 128,
+ 'ram': 51200, 'floating_ips': -1,
+ 'fixed_ips': -1, 'instances': 10,
+ 'injected_files': 5, 'cores': 20}}
+ self.common_policy_check(self.admin_authorized_contexts,
+ self.admin_unauthorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, 'test_class',
+ body=body)
+
+ @mock.patch('nova.quota.QUOTAS.get_class_quotas')
+ def test_show_quota_class_sets_policy(self, mock_get):
+ rule_name = policies.POLICY_ROOT % 'show'
+ self.common_policy_check(self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, 'test_class')
+
+
+class QuotaClassSetsScopeTypePolicyTest(QuotaClassSetsPolicyTest):
+ """Test Quota Class Sets APIs policies with system scope enabled.
+ This class set the nova.conf [oslo_policy] enforce_scope to True
+ so that we can switch on the scope checking on oslo policy side.
+ It defines the set of context with scoped token
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will run the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(QuotaClassSetsScopeTypePolicyTest, self).setUp()
+ self.flags(enforce_scope=True, group="oslo_policy")
+ # Check that system admin is able to update and get quota class
+ self.admin_authorized_contexts = [
+ self.system_admin_context]
+ # Check that non-system/admin is not able to update and get quota class
+ self.admin_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_member_context,
+ self.system_reader_context, self.project_admin_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context
+ ]
+ # Check that system reader is able to get quota class
+ self.system_reader_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system reader is not able to get quota class
+ self.system_reader_unauthorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context
+ ]
+
+
+class QuotaClassSetsNoLegacyPolicyTest(QuotaClassSetsScopeTypePolicyTest):
+ """Test Quota Class Sets APIs policies with system scope enabled,
+ and no more deprecated rules that allow the legacy admin API to
+ access system APIs.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(QuotaClassSetsNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_quota_sets.py b/nova/tests/unit/policies/test_quota_sets.py
new file mode 100644
index 0000000000..0b8d15c384
--- /dev/null
+++ b/nova/tests/unit/policies/test_quota_sets.py
@@ -0,0 +1,210 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.api.openstack.compute import quota_sets
+from nova.policies import quota_sets as policies
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.policies import base
+
+
+class QuotaSetsPolicyTest(base.BasePolicyTest):
+ """Test Quota Sets APIs policies with all possible context.
+ This class defines the set of context with different roles
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will call the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(QuotaSetsPolicyTest, self).setUp()
+ self.controller = quota_sets.QuotaSetsController()
+ self.controller._validate_quota_limit = mock.MagicMock()
+ self.req = fakes.HTTPRequest.blank('')
+ self.project_id = self.req.environ['nova.context'].project_id
+
+ # Check that admin is able to update or revert quota
+ # to default.
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # Check that non-admin is not able to update or revert
+ # quota to default.
+ self.admin_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ # Check that system reader is able to get another project's quota.
+ self.system_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system reader is not able to get another
+ # project's quota.
+ self.system_reader_unauthorized_contexts = [
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ # Check that everyone is able to get the default quota or
+ # their own quota.
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ self.everyone_unauthorized_contexts = []
+ # Check that system reader or owner is able to get their own quota.
+ self.system_reader_or_owner_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+
+ @mock.patch('nova.quota.QUOTAS.get_project_quotas')
+ @mock.patch('nova.quota.QUOTAS.get_settable_quotas')
+ def test_update_quota_sets_policy(self, mock_update, mock_get):
+ rule_name = policies.POLICY_ROOT % 'update'
+ body = {'quota_set': {
+ 'instances': 50,
+ 'cores': 50}
+ }
+ self.common_policy_check(self.admin_authorized_contexts,
+ self.admin_unauthorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, self.project_id,
+ body=body)
+
+ @mock.patch('nova.objects.Quotas.destroy_all_by_project')
+ def test_delete_quota_sets_policy(self, mock_delete):
+ rule_name = policies.POLICY_ROOT % 'delete'
+ self.common_policy_check(self.admin_authorized_contexts,
+ self.admin_unauthorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, self.project_id)
+
+ @mock.patch('nova.quota.QUOTAS.get_defaults')
+ def test_default_quota_sets_policy(self, mock_default):
+ rule_name = policies.POLICY_ROOT % 'defaults'
+ self.common_policy_check(self.everyone_authorized_contexts,
+ self.everyone_unauthorized_contexts,
+ rule_name,
+ self.controller.defaults,
+ self.req, self.project_id)
+
+ @mock.patch('nova.quota.QUOTAS.get_project_quotas')
+ def test_detail_quota_sets_policy(self, mock_get):
+ rule_name = policies.POLICY_ROOT % 'detail'
+ self.common_policy_check(self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name,
+ self.controller.detail,
+ self.req, 'try-other-project')
+ # Check if everyone (owner) is able to get their own quota
+ for cxtx in self.system_reader_or_owner_authorized_contexts:
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ self.controller.detail(req, cxtx.project_id)
+
+ @mock.patch('nova.quota.QUOTAS.get_project_quotas')
+ def test_show_quota_sets_policy(self, mock_get):
+ rule_name = policies.POLICY_ROOT % 'show'
+ self.common_policy_check(self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, 'try-other-project')
+ # Check if everyone (owner) is able to get their own quota
+ for cxtx in self.system_reader_or_owner_authorized_contexts:
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ self.controller.show(req, cxtx.project_id)
+
+
+class QuotaSetsScopeTypePolicyTest(QuotaSetsPolicyTest):
+ """Test Quota Sets APIs policies with system scope enabled.
+ This class set the nova.conf [oslo_policy] enforce_scope to True
+ so that we can switch on the scope checking on oslo policy side.
+ It defines the set of context with scoped token
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will run the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(QuotaSetsScopeTypePolicyTest, self).setUp()
+ self.flags(enforce_scope=True, group="oslo_policy")
+
+ # Check that system admin is able to update or revert quota
+ # to default.
+ self.admin_authorized_contexts = [
+ self.system_admin_context]
+ # Check that non-system admin is not able to update or revert
+ # quota to default.
+ self.admin_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_member_context,
+ self.project_admin_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+
+
+class QuotaSetsNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
+ """Test Quota Sets APIs policies with system scope enabled,
+ and no more deprecated rules that allow the legacy admin API to
+ access system APIs.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(QuotaSetsNoLegacyPolicyTest, self).setUp()
+
+ # Check that system reader is able to get another project's quota.
+ self.system_reader_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system reader is not able to get anotherproject's
+ # quota.
+ self.system_reader_unauthorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ # Check that everyone is able to get their own quota.
+ self.system_reader_or_owner_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.project_member_context,
+ self.project_reader_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
diff --git a/nova/tests/unit/policies/test_server_external_events.py b/nova/tests/unit/policies/test_server_external_events.py
index 932d521dd5..9dab7f5bd8 100644
--- a/nova/tests/unit/policies/test_server_external_events.py
+++ b/nova/tests/unit/policies/test_server_external_events.py
@@ -91,3 +91,11 @@ class ServerExternalEventsScopeTypePolicyTest(ServerExternalEventsPolicyTest):
self.project_reader_context, self.project_foo_context,
self.other_project_member_context
]
+
+
+class ServerExternalEventsNoLegacyPolicyTest(
+ ServerExternalEventsScopeTypePolicyTest):
+ """Test Server External Events APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_server_groups.py b/nova/tests/unit/policies/test_server_groups.py
index 05e0084cdd..73d8c0f12e 100644
--- a/nova/tests/unit/policies/test_server_groups.py
+++ b/nova/tests/unit/policies/test_server_groups.py
@@ -10,10 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_groups
+from nova import objects
from nova.policies import server_groups as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
@@ -32,52 +34,120 @@ class ServerGroupPolicyTest(base.BasePolicyTest):
self.controller = server_groups.ServerGroupController()
self.req = fakes.HTTPRequest.blank('')
- # Policy is admin_or_owner but we do not pass the project id
- # in policy enforcement to check the ownership. project id
- # is nothing but of server group for which request is made. So
- # let's keep it as it is and with new defaults and sceop enbled,
- # these can be authorized to meanigful roles.
+ self.mock_get = self.useFixture(
+ fixtures.MockPatch('nova.objects.InstanceGroup.get_by_uuid')).mock
+ self.sg = [objects.InstanceGroup(
+ uuid=uuids.fake_id, name='fake',
+ project_id=self.project_id, user_id='u1',
+ policies=[], members=[]),
+ objects.InstanceGroup(
+ uuid=uuids.fake_id, name='fake2', project_id='proj2',
+ user_id='u2', policies=[], members=[])]
+ self.mock_get.return_value = self.sg[0]
+
+ # Check that admin or and owner is able to delete
+ # the server group.
self.admin_or_owner_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
+ # Check that non-admin/owner is not able to delete
+ # the server group.
+ self.admin_or_owner_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context
+ ]
+ # Check that system reader or owner is able to get
+ # the server group. Due to old default everyone
+ # is allowed to perform this operation.
+ self.system_reader_or_owner_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.system_member_context,
+ self.system_reader_context, self.project_foo_context
+ ]
+ self.system_reader_or_owner_unauthorized_contexts = [
+ self.system_foo_context,
+ self.other_project_member_context
+ ]
+ # Check that everyone is able to list
+ # theie own server group. Due to old defaults everyone
+ # is able to list their server groups.
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context,
self.other_project_member_context]
- self.admin_or_owner_unauthorized_contexts = [
+ self.everyone_unauthorized_contexts = [
]
+ # Check that project member is able to create server group.
+ # Due to old defaults everyone is able to list their server groups.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context, self.project_reader_context,
+ self.project_foo_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context]
+ self.project_member_unauthorized_contexts = []
- @mock.patch('nova.objects.InstanceGroupList.get_all')
+ @mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
def test_index_server_groups_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
+ self.common_policy_check(self.everyone_authorized_contexts,
+ self.everyone_unauthorized_contexts,
rule_name,
self.controller.index,
self.req)
- @mock.patch('nova.objects.InstanceGroup.get_by_uuid')
- def test_show_server_groups_policy(self, mock_get):
+ @mock.patch('nova.objects.InstanceGroupList.get_all')
+ def test_index_all_project_server_groups_policy(self, mock_get_all):
+ mock_get_all.return_value = objects.InstanceGroupList(objects=self.sg)
+ # 'index' policy is checked before 'index:all_projects' so
+ # we have to allow it for everyone otherwise it will fail for
+ # unauthorized contexts here.
+ rule = policies.POLICY_ROOT % 'index'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ admin_req = fakes.HTTPRequest.blank(
+ '/os-server-groups?all_projects=True',
+ version='2.13', use_admin_context=True)
+ # Check admin user get all projects server groups.
+ resp = self.controller.index(admin_req)
+ projs = [sg['project_id'] for sg in resp['server_groups']]
+ self.assertEqual(2, len(projs))
+ self.assertIn('proj2', projs)
+ # Check non-admin user does not get all projects server groups.
+ req = fakes.HTTPRequest.blank('/os-server-groups?all_projects=True',
+ version='2.13')
+ resp = self.controller.index(req)
+ projs = [sg['project_id'] for sg in resp['server_groups']]
+ self.assertNotIn('proj2', projs)
+
+ def test_show_server_groups_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, uuids.fake_id)
+ self.common_policy_check(
+ self.system_reader_or_owner_authorized_contexts,
+ self.system_reader_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, uuids.fake_id)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_create_server_groups_policy(self, mock_quota):
rule_name = policies.POLICY_ROOT % 'create'
body = {'server_group': {'name': 'fake',
'policies': ['affinity']}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
+ self.common_policy_check(self.project_member_authorized_contexts,
+ self.project_member_unauthorized_contexts,
rule_name,
self.controller.create,
self.req, body=body)
- @mock.patch('nova.objects.InstanceGroup.get_by_uuid')
- def test_delete_server_groups_policy(self, mock_get):
+ @mock.patch('nova.objects.InstanceGroup.destroy')
+ def test_delete_server_groups_policy(self, mock_destroy):
rule_name = policies.POLICY_ROOT % 'delete'
self.common_policy_check(self.admin_or_owner_authorized_contexts,
self.admin_or_owner_unauthorized_contexts,
@@ -99,3 +169,82 @@ class ServerGroupScopeTypePolicyTest(ServerGroupPolicyTest):
def setUp(self):
super(ServerGroupScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+
+ # Check if project scoped can create the server group.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context,
+ self.other_project_member_context
+ ]
+ # Check if non-project scoped cannot create the server group.
+ self.project_member_unauthorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context, self.system_foo_context
+ ]
+
+ # TODO(gmann): Test this with system scope once we remove
+ # the hardcoded admin check
+ def test_index_all_project_server_groups_policy(self):
+ pass
+
+
+class ServerGroupNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
+ """Test Server Group APIs policies with system scope enabled,
+ and no more deprecated rules that allow the legacy admin API to
+ access system APIs.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerGroupNoLegacyPolicyTest, self).setUp()
+
+ # Check that system admin or and owner is able to delete
+ # the server group.
+ self.admin_or_owner_authorized_contexts = [
+ self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ ]
+ # Check that non-system admin/owner is not able to delete
+ # the server group.
+ self.admin_or_owner_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_member_context,
+ self.system_reader_context, self.system_foo_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context
+ ]
+ # Check that system reader or owner is able to get
+ # the server group.
+ self.system_reader_or_owner_authorized_contexts = [
+ self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.system_member_context,
+ self.system_reader_context
+ ]
+ self.system_reader_or_owner_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_foo_context,
+ self.other_project_member_context, self.project_foo_context
+ ]
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
+ self.everyone_unauthorized_contexts = [
+ self.project_foo_context,
+ self.system_foo_context
+ ]
+ # Check if project member can create the server group.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.other_project_member_context
+ ]
+ # Check if non-project member cannot create the server group.
+ self.project_member_unauthorized_contexts = [
+ self.system_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_reader_context,
+ self.project_foo_context,
+ ]
diff --git a/nova/tests/unit/policies/test_server_topology.py b/nova/tests/unit/policies/test_server_topology.py
new file mode 100644
index 0000000000..49694eadeb
--- /dev/null
+++ b/nova/tests/unit/policies/test_server_topology.py
@@ -0,0 +1,161 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova.api.openstack.compute import server_topology
+from nova.compute import vm_states
+from nova import objects
+from nova.policies import server_topology as policies
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.policies import base
+
+
+class ServerTopologyPolicyTest(base.BasePolicyTest):
+ """Test Server Topology APIs policies with all possible context.
+ This class defines the set of context with different roles
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will call the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(ServerTopologyPolicyTest, self).setUp()
+ self.controller = server_topology.ServerTopologyController()
+ self.req = fakes.HTTPRequest.blank('', version='2.78')
+ self.mock_get = self.useFixture(
+ fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
+ self.instance = fake_instance.fake_instance_obj(
+ self.project_member_context,
+ id=1, uuid=uuids.fake_id, project_id=self.project_id,
+ vm_state=vm_states.ACTIVE)
+ self.mock_get.return_value = self.instance
+ self.instance.numa_topology = objects.InstanceNUMATopology(
+ instance_uuid = self.instance.uuid,
+ cells=[objects.InstanceNUMACell(
+ node=0, memory=1024, pagesize=4, id=123,
+ cpu_topology=None,
+ cpu_pinning={},
+ cpuset=set([0, 1]))])
+
+ # Check that system reader or and server owner is able to get
+ # the server topology.
+ self.system_reader_or_owner_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.system_member_context, self.system_reader_context]
+ # Check that non-stem reader/owner is not able to get
+ # the server topology.
+ self.system_reader_or_owner_unauthorized_contexts = [
+ self.system_foo_context, self.other_project_member_context,
+ self.other_project_reader_context,
+ ]
+ # Check that system reader is able to get the server topology
+ # host information.
+ self.system_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system reader is not able to get the server topology
+ # host information.
+ self.system_reader_unauthorized_contexts = [
+ self.system_foo_context, self.project_member_context,
+ self.other_project_member_context,
+ self.project_foo_context, self.project_reader_context,
+ self.other_project_reader_context
+ ]
+
+ def test_index_server_topology_policy(self):
+ rule_name = policies.BASE_POLICY_NAME % 'index'
+ self.common_policy_check(
+ self.system_reader_or_owner_authorized_contexts,
+ self.system_reader_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
+
+ def test_index_host_server_topology_policy(self):
+ rule_name = policies.BASE_POLICY_NAME % 'host:index'
+ # 'index' policy is checked before 'host:index' so
+ # we have to allow it for everyone otherwise it will
+ # fail first for unauthorized contexts.
+ rule = policies.BASE_POLICY_NAME % 'index'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name, self.controller.index, self.req, self.instance.uuid,
+ fatal=False)
+ for resp in authorize_res:
+ self.assertEqual(123, resp['nodes'][0]['host_node'])
+ self.assertEqual({}, resp['nodes'][0]['cpu_pinning'])
+ for resp in unauthorize_res:
+ self.assertNotIn('host_node', resp['nodes'][0])
+ self.assertNotIn('cpu_pinning', resp['nodes'][0])
+
+
+class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
+ """Test Server Topology APIs policies with system scope enabled.
+ This class set the nova.conf [oslo_policy] enforce_scope to True
+ so that we can switch on the scope checking on oslo policy side.
+ It defines the set of context with scoped token
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will run the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(ServerTopologyScopeTypePolicyTest, self).setUp()
+ self.flags(enforce_scope=True, group="oslo_policy")
+
+ # Check that system reader is able to get the server topology
+ # host information.
+ self.system_reader_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system/reader is not able to get the server topology
+ # host information.
+ self.system_reader_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_foo_context,
+ self.project_admin_context, self.project_member_context,
+ self.other_project_member_context,
+ self.project_foo_context, self.project_reader_context,
+ self.other_project_reader_context,
+ ]
+
+
+class ServerTopologyNoLegacyPolicyTest(ServerTopologyScopeTypePolicyTest):
+ """Test Server Topology APIs policies with system scope enabled,
+ and no more deprecated rules that allow the legacy admin API to
+ access system APIs.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerTopologyNoLegacyPolicyTest, self).setUp()
+ # Check that system reader/owner is able to get
+ # the server topology.
+ self.system_reader_or_owner_authorized_contexts = [
+ self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context, self.system_reader_context,
+ self.project_reader_context]
+ # Check that non-system/reader/owner is not able to get
+ # the server topology.
+ self.system_reader_or_owner_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_foo_context,
+ self.other_project_member_context, self.project_foo_context,
+ self.other_project_reader_context,
+ ]
diff --git a/nova/tests/unit/policies/test_servers.py b/nova/tests/unit/policies/test_servers.py
new file mode 100644
index 0000000000..6d62245e39
--- /dev/null
+++ b/nova/tests/unit/policies/test_servers.py
@@ -0,0 +1,1468 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+import mock
+from oslo_utils.fixture import uuidsentinel as uuids
+from oslo_utils import timeutils
+
+from nova.api.openstack.compute import migrate_server
+from nova.api.openstack.compute import servers
+from nova.compute import api as compute
+from nova.compute import vm_states
+from nova import exception
+from nova.network import neutron
+from nova import objects
+from nova.objects import fields
+from nova.objects.instance_group import InstanceGroup
+from nova.policies import extended_server_attributes as ea_policies
+from nova.policies import servers as policies
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_flavor
+from nova.tests.unit import fake_instance
+from nova.tests.unit.policies import base
+
+
+class ServersPolicyTest(base.BasePolicyTest):
+ """Test Servers APIs policies with all possible context.
+ This class defines the set of context with different roles
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will call the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(ServersPolicyTest, self).setUp()
+ self.controller = servers.ServersController()
+ self.m_controller = migrate_server.MigrateServerController()
+ self.rule_trusted_certs = policies.SERVERS % 'create:trusted_certs'
+ self.rule_attach_network = policies.SERVERS % 'create:attach_network'
+ self.rule_attach_volume = policies.SERVERS % 'create:attach_volume'
+ self.rule_requested_destination = policies.REQUESTED_DESTINATION
+ self.rule_forced_host = policies.SERVERS % 'create:forced_host'
+
+ self.req = fakes.HTTPRequest.blank('')
+ user_id = self.req.environ['nova.context'].user_id
+
+ self.controller._view_builder._add_security_grps = mock.MagicMock()
+ self.controller._view_builder._get_metadata = mock.MagicMock()
+ self.controller._view_builder._get_addresses = mock.MagicMock()
+ self.controller._view_builder._get_host_id = mock.MagicMock()
+ self.controller._view_builder._get_fault = mock.MagicMock()
+
+ self.instance = fake_instance.fake_instance_obj(
+ self.project_member_context,
+ id=1, uuid=uuids.fake_id, project_id=self.project_id,
+ user_id=user_id, vm_state=vm_states.ACTIVE,
+ system_metadata={}, expected_attrs=['system_metadata'])
+
+ self.mock_flavor = self.useFixture(
+ fixtures.MockPatch('nova.compute.flavors.get_flavor_by_flavor_id'
+ )).mock
+ self.mock_flavor.return_value = fake_flavor.fake_flavor_obj(
+ self.req.environ['nova.context'], flavorid='1')
+
+ self.mock_get = self.useFixture(
+ fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
+ self.mock_get.return_value = self.instance
+
+ self.mock_get_instance = self.useFixture(fixtures.MockPatchObject(
+ self.controller, '_get_instance')).mock
+ self.mock_get_instance.return_value = self.instance
+
+ self.servers = [fakes.stub_instance_obj(
+ 1, vm_state=vm_states.ACTIVE, uuid=uuids.fake,
+ project_id=self.project_id, user_id='user1'),
+ fakes.stub_instance_obj(
+ 2, vm_state=vm_states.ACTIVE, uuid=uuids.fake,
+ project_id='proj2', user_id='user2')]
+ fakes.stub_out_secgroup_api(
+ self, security_groups=[{'name': 'default'}])
+ self.mock_get_all = self.useFixture(fixtures.MockPatchObject(
+ self.controller.compute_api, 'get_all')).mock
+ self.body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': uuids.fake_id,
+ 'flavorRef': uuids.fake_id,
+ },
+ }
+ self.extended_attr = ['OS-EXT-SRV-ATTR:host',
+ 'OS-EXT-SRV-ATTR:hypervisor_hostname',
+ 'OS-EXT-SRV-ATTR:instance_name',
+ 'OS-EXT-SRV-ATTR:hostname',
+ 'OS-EXT-SRV-ATTR:kernel_id',
+ 'OS-EXT-SRV-ATTR:launch_index',
+ 'OS-EXT-SRV-ATTR:ramdisk_id',
+ 'OS-EXT-SRV-ATTR:reservation_id',
+ 'OS-EXT-SRV-ATTR:root_device_name',
+ 'OS-EXT-SRV-ATTR:user_data'
+ ]
+
+ # Check that admin or and owner is able to update, delete
+ # or perform server action.
+ self.admin_or_owner_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
+ # Check that non-admin/owner is not able to update, delete
+ # or perform server action.
+ self.admin_or_owner_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+
+ # Check that system reader or owner is able to get
+ # the server.
+ self.system_reader_or_owner_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.system_member_context,
+ self.system_reader_context, self.project_foo_context
+ ]
+ self.system_reader_or_owner_unauthorized_contexts = [
+ self.system_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+
+ # Check that everyone is able to list their own server.
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context]
+ self.everyone_unauthorized_contexts = [
+ ]
+ # Check that admin is able to create server with host request
+ # and get server extended attributes or host status.
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # Check that non-admin is not able to create server with host request
+ # and get server extended attributes or host status.
+ self.admin_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ # Check that sustem reader is able to list the server
+ # for all projects.
+ self.system_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system reader is not able to list the server
+ # for all projects.
+ self.system_reader_unauthorized_contexts = [
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ # Check that project member is able to create serve
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context, self.system_foo_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context]
+ # Check that non-project member is not able to create server
+ self.project_member_unauthorized_contexts = [
+ ]
+ # Check that project admin is able to create server with requested
+ # destination.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # Check that non-project admin is not able to create server with
+ # requested destination
+ self.project_admin_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ # Check that no one is able to resize cross cell.
+ self.cross_cell_authorized_contexts = []
+ self.cross_cell_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context]
+ # Check that admin is able to access the zero disk flavor
+ # and external network policies.
+ self.zero_disk_external_net_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # Check that non-admin is not able to caccess the zero disk flavor
+ # and external network policies.
+ self.zero_disk_external_net_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ # Check that admin is able to get server extended attributes
+ # or host status.
+ self.server_attr_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # Check that non-admin is not able to get server extended attributes
+ # or host status.
+ self.server_attr_admin_unauthorized_contexts = [
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+
+ def test_index_server_policy(self):
+
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ self.assertIsNotNone(search_opts)
+ if 'project_id' in search_opts or 'user_id' in search_opts:
+ return objects.InstanceList(objects=self.servers)
+ else:
+ raise
+
+ self.mock_get_all.side_effect = fake_get_all
+
+ rule_name = policies.SERVERS % 'index'
+ self.common_policy_check(
+ self.everyone_authorized_contexts,
+ self.everyone_unauthorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
+
+ def test_index_all_project_server_policy(self):
+ # 'index' policy is checked before 'index:get_all_tenants' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'index'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ rule_name = policies.SERVERS % 'index:get_all_tenants'
+ req = fakes.HTTPRequest.blank('/servers?all_tenants')
+
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ self.assertIsNotNone(search_opts)
+ self.assertNotIn('project_id', search_opts)
+ return objects.InstanceList(objects=self.servers)
+
+ self.mock_get_all.side_effect = fake_get_all
+
+ self.common_policy_check(self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name,
+ self.controller.index,
+ req)
+
+ @mock.patch('nova.compute.api.API.get_all')
+ def test_detail_list_server_policy(self, mock_get):
+
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ self.assertIsNotNone(search_opts)
+ if 'project_id' in search_opts or 'user_id' in search_opts:
+ return objects.InstanceList(objects=self.servers)
+ else:
+ raise
+
+ self.mock_get_all.side_effect = fake_get_all
+
+ rule_name = policies.SERVERS % 'detail'
+ self.common_policy_check(
+ self.everyone_authorized_contexts,
+ self.everyone_unauthorized_contexts,
+ rule_name,
+ self.controller.detail,
+ self.req)
+
+ def test_detail_list_all_project_server_policy(self):
+ # 'detail' policy is checked before 'detail:get_all_tenants' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'detail'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ rule_name = policies.SERVERS % 'detail:get_all_tenants'
+ req = fakes.HTTPRequest.blank('/servers?all_tenants')
+
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ self.assertIsNotNone(search_opts)
+ self.assertNotIn('project_id', search_opts)
+ return objects.InstanceList(objects=self.servers)
+
+ self.mock_get_all.side_effect = fake_get_all
+
+ self.common_policy_check(self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name,
+ self.controller.detail,
+ req)
+
+ def test_index_server_allow_all_filters_policy(self):
+ # 'index' policy is checked before 'allow_all_filters' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'index'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ self.assertIsNotNone(search_opts)
+ if context in self.system_reader_unauthorized_contexts:
+ self.assertNotIn('host', search_opts)
+ if context in self.system_reader_authorized_contexts:
+ self.assertIn('host', search_opts)
+ return objects.InstanceList(objects=self.servers)
+
+ self.mock_get_all.side_effect = fake_get_all
+
+ req = fakes.HTTPRequest.blank('/servers?host=1')
+ rule_name = policies.SERVERS % 'allow_all_filters'
+ self.common_policy_check(
+ self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name,
+ self.controller.index,
+ req, fatal=False)
+
+ def test_detail_server_allow_all_filters_policy(self):
+ # 'detail' policy is checked before 'allow_all_filters' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'detail'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ self.assertIsNotNone(search_opts)
+ if context in self.system_reader_unauthorized_contexts:
+ self.assertNotIn('host', search_opts)
+ if context in self.system_reader_authorized_contexts:
+ self.assertIn('host', search_opts)
+ return objects.InstanceList(objects=self.servers)
+ self.mock_get_all.side_effect = fake_get_all
+
+ req = fakes.HTTPRequest.blank('/servers?host=1')
+ rule_name = policies.SERVERS % 'allow_all_filters'
+ self.common_policy_check(
+ self.system_reader_authorized_contexts,
+ self.system_reader_unauthorized_contexts,
+ rule_name,
+ self.controller.detail,
+ req, fatal=False)
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ def test_show_server_policy(self, mock_bdm):
+ rule_name = policies.SERVERS % 'show'
+ self.common_policy_check(
+ self.system_reader_or_owner_authorized_contexts,
+ self.system_reader_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, self.instance.uuid)
+
+ @mock.patch('nova.compute.api.API.create')
+ def test_create_server_policy(self, mock_create):
+ mock_create.return_value = ([self.instance], '')
+ rule_name = policies.SERVERS % 'create'
+ self.common_policy_check(self.project_member_authorized_contexts,
+ self.project_member_unauthorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, body=self.body)
+
+ @mock.patch('nova.compute.api.API.create')
+ @mock.patch('nova.compute.api.API.parse_availability_zone')
+ def test_create_forced_host_server_policy(self, mock_az, mock_create):
+ # 'create' policy is checked before 'create:forced_host' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'create'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ mock_create.return_value = ([self.instance], '')
+ mock_az.return_value = ('test', 'host', None)
+ self.common_policy_check(self.project_admin_authorized_contexts,
+ self.project_admin_unauthorized_contexts,
+ self.rule_forced_host,
+ self.controller.create,
+ self.req, body=self.body)
+
+ @mock.patch('nova.compute.api.API.create')
+ def test_create_attach_volume_server_policy(self, mock_create):
+ # 'create' policy is checked before 'create:attach_volume' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'create'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ mock_create.return_value = ([self.instance], '')
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': uuids.fake_id,
+ 'flavorRef': uuids.fake_id,
+ 'block_device_mapping': [{'device_name': 'foo'}],
+ },
+ }
+ self.common_policy_check(self.project_member_authorized_contexts,
+ self.project_member_unauthorized_contexts,
+ self.rule_attach_volume,
+ self.controller.create,
+ self.req, body=body)
+
+ @mock.patch('nova.compute.api.API.create')
+ def test_create_attach_network_server_policy(self, mock_create):
+ # 'create' policy is checked before 'create:attach_network' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'create'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ mock_create.return_value = ([self.instance], '')
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': uuids.fake_id,
+ 'flavorRef': uuids.fake_id,
+ 'networks': [{
+ 'uuid': uuids.fake_id
+ }],
+ },
+ }
+ self.common_policy_check(self.project_member_authorized_contexts,
+ self.project_member_unauthorized_contexts,
+ self.rule_attach_network,
+ self.controller.create,
+ self.req, body=body)
+
+ @mock.patch('nova.compute.api.API.create')
+ def test_create_trusted_certs_server_policy(self, mock_create):
+ # 'create' policy is checked before 'create:trusted_certs' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'create'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.63')
+ mock_create.return_value = ([self.instance], '')
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': uuids.fake_id,
+ 'flavorRef': uuids.fake_id,
+ 'trusted_image_certificates': [uuids.fake_id],
+ 'networks': [{
+ 'uuid': uuids.fake_id
+ }],
+
+ },
+ }
+ self.common_policy_check(self.project_member_authorized_contexts,
+ self.project_member_unauthorized_contexts,
+ self.rule_trusted_certs,
+ self.controller.create,
+ req, body=body)
+
+ @mock.patch('nova.compute.api.API.delete')
+ def test_delete_server_policy(self, mock_delete):
+ rule_name = policies.SERVERS % 'delete'
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, self.instance.uuid)
+
+ def test_delete_server_policy_failed_with_other_user(self):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'].user_id = 'other-user'
+ rule_name = policies.SERVERS % 'delete'
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller.delete,
+ req, self.instance.uuid)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
+
+ @mock.patch('nova.compute.api.API.delete')
+ def test_delete_server_overridden_policy_pass_with_same_user(
+ self, mock_delete):
+ rule_name = policies.SERVERS % 'delete'
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ self.controller.delete(self.req,
+ self.instance.uuid)
+
+ @mock.patch('nova.compute.api.API.update_instance')
+ def test_update_server_policy(self, mock_update):
+ rule_name = policies.SERVERS % 'update'
+ body = {'server': {'name': 'test'}}
+
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, self.instance.uuid, body=body)
+
+ def test_update_server_policy_failed_with_other_user(self):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'].user_id = 'other-user'
+ rule_name = policies.SERVERS % 'update'
+ body = {'server': {'name': 'test'}}
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller.update,
+ req, self.instance.uuid, body=body)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
+
+ @mock.patch('nova.compute.api.API.update_instance')
+ def test_update_server_overridden_policy_pass_with_same_user(
+ self, mock_update):
+ rule_name = policies.SERVERS % 'update'
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ body = {'server': {'name': 'test'}}
+ self.controller.update(self.req,
+ self.instance.uuid, body=body)
+
+ @mock.patch('nova.compute.api.API.confirm_resize')
+ def test_confirm_resize_server_policy(self, mock_confirm_resize):
+ rule_name = policies.SERVERS % 'confirm_resize'
+
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._action_confirm_resize,
+ self.req, self.instance.uuid,
+ body={'confirmResize': 'null'})
+
+ @mock.patch('nova.compute.api.API.revert_resize')
+ def test_revert_resize_server_policy(self, mock_revert_resize):
+ rule_name = policies.SERVERS % 'revert_resize'
+
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._action_revert_resize,
+ self.req, self.instance.uuid,
+ body={'revertResize': 'null'})
+
+ @mock.patch('nova.compute.api.API.reboot')
+ def test_reboot_server_policy(self, mock_reboot):
+ rule_name = policies.SERVERS % 'reboot'
+
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._action_reboot,
+ self.req, self.instance.uuid,
+ body={'reboot': {'type': 'soft'}})
+
+ @mock.patch('nova.api.openstack.common.'
+ 'instance_has_port_with_resource_request')
+ @mock.patch('nova.compute.api.API.resize')
+ def test_resize_server_policy(self, mock_resize, mock_port):
+ rule_name = policies.SERVERS % 'resize'
+ mock_port.return_value = False
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._action_resize,
+ self.req, self.instance.uuid,
+ body={'resize': {'flavorRef': 'f1'}})
+
+ def test_resize_server_policy_failed_with_other_user(self):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'].user_id = 'other-user'
+ rule_name = policies.SERVERS % 'resize'
+ body = {'resize': {'flavorRef': 'f1'}}
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller._action_resize,
+ req, self.instance.uuid, body=body)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
+
+ @mock.patch('nova.api.openstack.common.'
+ 'instance_has_port_with_resource_request')
+ @mock.patch('nova.compute.api.API.resize')
+ def test_resize_server_overridden_policy_pass_with_same_user(
+ self, mock_resize, mock_port):
+ rule_name = policies.SERVERS % 'resize'
+ mock_port.return_value = False
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ body = {'resize': {'flavorRef': 'f1'}}
+ self.controller._action_resize(self.req,
+ self.instance.uuid, body=body)
+
+ @mock.patch('nova.compute.api.API.start')
+ def test_start_server_policy(self, mock_start):
+ rule_name = policies.SERVERS % 'start'
+
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._start_server,
+ self.req, self.instance.uuid,
+ body={'os-start': 'null'})
+
+ @mock.patch('nova.compute.api.API.stop')
+ def test_stop_server_policy(self, mock_stop):
+ rule_name = policies.SERVERS % 'stop'
+
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._stop_server,
+ self.req, self.instance.uuid,
+ body={'os-stop': 'null'})
+
+ def test_stop_server_policy_failed_with_other_user(self):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'].user_id = 'other-user'
+ rule_name = policies.SERVERS % 'stop'
+ body = {'os-stop': 'null'}
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller._stop_server,
+ req, self.instance.uuid, body=body)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
+
+ @mock.patch('nova.compute.api.API.stop')
+ def test_stop_server_overridden_policy_pass_with_same_user(
+ self, mock_stop):
+ rule_name = policies.SERVERS % 'stop'
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ body = {'os-stop': 'null'}
+ self.controller._stop_server(self.req,
+ self.instance.uuid, body=body)
+
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_rebuild_server_policy(self, mock_rebuild):
+ rule_name = policies.SERVERS % 'rebuild'
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._action_rebuild,
+ self.req, self.instance.uuid,
+ body={'rebuild': {"imageRef": uuids.fake_id}})
+
+ def test_rebuild_server_policy_failed_with_other_user(self):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'].user_id = 'other-user'
+ rule_name = policies.SERVERS % 'rebuild'
+ body = {'rebuild': {"imageRef": uuids.fake_id}}
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller._action_rebuild,
+ req, self.instance.uuid, body=body)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
+
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_rebuild_server_overridden_policy_pass_with_same_user(
+ self, mock_rebuild):
+ rule_name = policies.SERVERS % 'rebuild'
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ body = {'rebuild': {"imageRef": uuids.fake_id}}
+ self.controller._action_rebuild(self.req,
+ self.instance.uuid, body=body)
+
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_rebuild_trusted_certs_server_policy(self, mock_rebuild):
+ # 'rebuild' policy is checked before 'rebuild:trusted_certs' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'rebuild'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.63')
+ rule_name = policies.SERVERS % 'rebuild:trusted_certs'
+ body = {
+ 'rebuild': {
+ 'imageRef': uuids.fake_id,
+ 'trusted_image_certificates': [uuids.fake_id],
+ },
+ }
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._action_rebuild,
+ req, self.instance.uuid, body=body)
+
+ def test_rebuild_trusted_certs_policy_failed_with_other_user(self):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('', version='2.63')
+ req.environ['nova.context'].user_id = 'other-user'
+ rule = policies.SERVERS % 'rebuild'
+ rule_name = policies.SERVERS % 'rebuild:trusted_certs'
+ body = {
+ 'rebuild': {
+ 'imageRef': uuids.fake_id,
+ 'trusted_image_certificates': [uuids.fake_id],
+ },
+ }
+ self.policy.set_rules(
+ {rule: "@",
+ rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller._action_rebuild,
+ req, self.instance.uuid, body=body)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
+
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_rebuild_trusted_certs_overridden_policy_pass_with_same_user(
+ self, mock_rebuild):
+ req = fakes.HTTPRequest.blank('', version='2.63')
+ rule = policies.SERVERS % 'rebuild'
+ rule_name = policies.SERVERS % 'rebuild:trusted_certs'
+ body = {
+ 'rebuild': {
+ 'imageRef': uuids.fake_id,
+ 'trusted_image_certificates': [uuids.fake_id],
+ },
+ }
+ self.policy.set_rules(
+ {rule: "@",
+ rule_name: "user_id:%(user_id)s"}, overwrite=False)
+ self.controller._action_rebuild(req,
+ self.instance.uuid, body=body)
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
+ @mock.patch('nova.image.glance.API.generate_image_url')
+ @mock.patch('nova.compute.api.API.snapshot_volume_backed')
+ def test_create_image_server_policy(self, mock_snapshot, mock_image,
+ mock_bdm):
+ rule_name = policies.SERVERS % 'create_image'
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._action_create_image,
+ self.req, self.instance.uuid,
+ body={'createImage': {"name": 'test'}})
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
+ @mock.patch('nova.image.glance.API.generate_image_url')
+ @mock.patch('nova.compute.api.API.snapshot_volume_backed')
+ def test_create_image_allow_volume_backed_server_policy(self,
+ mock_snapshot, mock_image, mock_bdm):
+ # 'create_image' policy is checked before
+ # 'create_image:allow_volume_backed' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'create_image'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+
+ rule_name = policies.SERVERS % 'create_image:allow_volume_backed'
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._action_create_image,
+ self.req, self.instance.uuid,
+ body={'createImage': {"name": 'test'}})
+
+ @mock.patch('nova.compute.api.API.trigger_crash_dump')
+ def test_trigger_crash_dump_server_policy(self, mock_crash):
+ rule_name = policies.SERVERS % 'trigger_crash_dump'
+ req = fakes.HTTPRequest.blank('', version='2.17')
+ self.common_policy_check(self.admin_or_owner_authorized_contexts,
+ self.admin_or_owner_unauthorized_contexts,
+ rule_name,
+ self.controller._action_trigger_crash_dump,
+ req, self.instance.uuid,
+ body={'trigger_crash_dump': None})
+
+ def test_trigger_crash_dump_policy_failed_with_other_user(self):
+ # Change the user_id in request context.
+ req = fakes.HTTPRequest.blank('', version='2.17')
+ req.environ['nova.context'].user_id = 'other-user'
+ rule_name = policies.SERVERS % 'trigger_crash_dump'
+ body = {'trigger_crash_dump': None}
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized,
+ self.controller._action_trigger_crash_dump,
+ req, self.instance.uuid, body=body)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
+
+ @mock.patch('nova.compute.api.API.trigger_crash_dump')
+ def test_trigger_crash_dump_overridden_policy_pass_with_same_user(
+ self, mock_crash):
+ req = fakes.HTTPRequest.blank('', version='2.17')
+ rule_name = policies.SERVERS % 'trigger_crash_dump'
+ self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
+ overwrite=False)
+ body = {'trigger_crash_dump': None}
+ self.controller._action_trigger_crash_dump(req,
+ self.instance.uuid, body=body)
+
+ def test_server_detail_with_extended_attr_policy(self):
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ return objects.InstanceList(objects=self.servers)
+ self.mock_get_all.side_effect = fake_get_all
+
+ rule = policies.SERVERS % 'detail'
+ # server 'detail' policy is checked before extended attributes
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.3')
+ rule_name = ea_policies.BASE_POLICY_NAME
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller.detail, req,
+ fatal=False)
+ for attr in self.extended_attr:
+ for resp in authorize_res:
+ self.assertIn(attr, resp['servers'][0])
+ for resp in unauthorize_res:
+ self.assertNotIn(attr, resp['servers'][0])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ def test_server_show_with_extended_attr_policy(self, mock_get, mock_block):
+ rule = policies.SERVERS % 'show'
+ # server 'show' policy is checked before extended attributes
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.3')
+ rule_name = ea_policies.BASE_POLICY_NAME
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller.show, req, 'fake',
+ fatal=False)
+ for attr in self.extended_attr:
+ for resp in authorize_res:
+ self.assertIn(attr, resp['server'])
+ for resp in unauthorize_res:
+ self.assertNotIn(attr, resp['server'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_server_rebuild_with_extended_attr_policy(self, mock_rebuild,
+ mock_get, mock_bdm):
+ rule = policies.SERVERS % 'rebuild'
+ # server 'rebuild' policy is checked before extended attributes
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.75')
+ rule_name = ea_policies.BASE_POLICY_NAME
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller._action_rebuild,
+ req, self.instance.uuid,
+ body={'rebuild': {"imageRef": uuids.fake_id}},
+ fatal=False)
+ for attr in self.extended_attr:
+ # NOTE(gmann): user_data attribute is always present in
+ # rebuild response since 2.47.
+ if attr == 'OS-EXT-SRV-ATTR:user_data':
+ continue
+ for resp in authorize_res:
+ self.assertIn(attr, resp.obj['server'])
+ for resp in unauthorize_res:
+ self.assertNotIn(attr, resp.obj['server'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.update_instance')
+ def test_server_update_with_extended_attr_policy(self,
+ mock_update, mock_group, mock_bdm):
+ rule = policies.SERVERS % 'update'
+ # server 'update' policy is checked before extended attributes
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.75')
+ rule_name = ea_policies.BASE_POLICY_NAME
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller.update,
+ req, self.instance.uuid,
+ body={'server': {'name': 'test'}},
+ fatal=False)
+ for attr in self.extended_attr:
+ for resp in authorize_res:
+ self.assertIn(attr, resp['server'])
+ for resp in unauthorize_res:
+ self.assertNotIn(attr, resp['server'])
+
+ def test_server_detail_with_host_status_policy(self):
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ return objects.InstanceList(objects=self.servers)
+ self.mock_get_all.side_effect = fake_get_all
+
+ rule = policies.SERVERS % 'detail'
+ # server 'detail' policy is checked before host_status
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.16')
+ rule_name = policies.SERVERS % 'show:host_status'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller.detail, req,
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('host_status', resp['servers'][0])
+ for resp in unauthorize_res:
+ self.assertNotIn('host_status', resp['servers'][0])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ def test_server_show_with_host_status_policy(self,
+ mock_status, mock_block):
+ rule = policies.SERVERS % 'show'
+ # server 'show' policy is checked before host_status
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.16')
+ rule_name = policies.SERVERS % 'show:host_status'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller.show, req, 'fake',
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('host_status', resp['server'])
+ for resp in unauthorize_res:
+ self.assertNotIn('host_status', resp['server'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_server_rebuild_with_host_status_policy(self, mock_rebuild,
+ mock_status, mock_bdm):
+ rule = policies.SERVERS % 'rebuild'
+ # server 'rebuild' policy is checked before host_status
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.75')
+ rule_name = policies.SERVERS % 'show:host_status'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller._action_rebuild,
+ req, self.instance.uuid,
+ body={'rebuild': {"imageRef": uuids.fake_id}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('host_status', resp.obj['server'])
+ for resp in unauthorize_res:
+ self.assertNotIn('host_status', resp.obj['server'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.update_instance')
+ def test_server_update_with_host_status_policy(self,
+ mock_update, mock_group, mock_bdm):
+ rule = policies.SERVERS % 'update'
+ # server 'update' policy is checked before host_status
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.75')
+ rule_name = policies.SERVERS % 'show:host_status'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller.update,
+ req, self.instance.uuid,
+ body={'server': {'name': 'test'}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('host_status', resp['server'])
+ for resp in unauthorize_res:
+ self.assertNotIn('host_status', resp['server'])
+
+ @mock.patch('nova.compute.api.API.get_instances_host_statuses')
+ def test_server_detail_with_unknown_host_status_policy(self, mock_status):
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ return objects.InstanceList(objects=self.servers)
+ self.mock_get_all.side_effect = fake_get_all
+ host_statuses = {}
+ for server in self.servers:
+ host_statuses.update({server.uuid: fields.HostStatus.UNKNOWN})
+ mock_status.return_value = host_statuses
+ rule = policies.SERVERS % 'detail'
+ # server 'detail' policy is checked before unknown host_status
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts. To verify the unknown host_status
+ # policy we need to disallow host_status policy for everyone.
+ rule_host_status = policies.SERVERS % 'show:host_status'
+ self.policy.set_rules({
+ rule: "@",
+ rule_host_status: "!"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.16')
+ rule_name = policies.SERVERS % 'show:host_status:unknown-only'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller.detail, req,
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('host_status', resp['servers'][0])
+ self.assertEqual(fields.HostStatus.UNKNOWN,
+ resp['servers'][0]['host_status'])
+ for resp in unauthorize_res:
+ self.assertNotIn('host_status', resp['servers'][0])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ def test_server_show_with_unknown_host_status_policy(self,
+ mock_status, mock_block):
+ mock_status.return_value = fields.HostStatus.UNKNOWN
+ rule = policies.SERVERS % 'show'
+ # server 'show' policy is checked before unknown host_status
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts. To verify the unknown host_status
+ # policy we need to disallow host_status policy for everyone.
+ rule_host_status = policies.SERVERS % 'show:host_status'
+ self.policy.set_rules({
+ rule: "@",
+ rule_host_status: "!"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.16')
+ rule_name = policies.SERVERS % 'show:host_status:unknown-only'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller.show, req, 'fake',
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('host_status', resp['server'])
+ self.assertEqual(
+ fields.HostStatus.UNKNOWN, resp['server']['host_status'])
+ for resp in unauthorize_res:
+ self.assertNotIn('host_status', resp['server'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_server_rebuild_with_unknown_host_status_policy(self, mock_rebuild,
+ mock_status, mock_bdm):
+ mock_status.return_value = fields.HostStatus.UNKNOWN
+ rule = policies.SERVERS % 'rebuild'
+ # server 'rebuild' policy is checked before unknown host_status
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts. To verify the unknown host_status
+ # policy we need to disallow host_status policy for everyone.
+ rule_host_status = policies.SERVERS % 'show:host_status'
+ self.policy.set_rules({
+ rule: "@",
+ rule_host_status: "!"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.75')
+ rule_name = policies.SERVERS % 'show:host_status:unknown-only'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller._action_rebuild,
+ req, self.instance.uuid,
+ body={'rebuild': {"imageRef": uuids.fake_id}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('host_status', resp.obj['server'])
+ self.assertEqual(
+ fields.HostStatus.UNKNOWN, resp.obj['server']['host_status'])
+ for resp in unauthorize_res:
+ self.assertNotIn('host_status', resp.obj['server'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ @mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.update_instance')
+ def test_server_update_with_unknown_host_status_policy(self,
+ mock_update, mock_group, mock_status, mock_bdm):
+ mock_status.return_value = fields.HostStatus.UNKNOWN
+ rule = policies.SERVERS % 'update'
+ # server 'update' policy is checked before unknown host_status
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts. To verify the unknown host_status
+ # policy we need to disallow host_status policy for everyone.
+ rule_host_status = policies.SERVERS % 'show:host_status'
+ self.policy.set_rules({
+ rule: "@",
+ rule_host_status: "!"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.75')
+ rule_name = policies.SERVERS % 'show:host_status:unknown-only'
+ authorize_res, unauthorize_res = self.common_policy_check(
+ self.server_attr_admin_authorized_contexts,
+ self.server_attr_admin_unauthorized_contexts,
+ rule_name, self.controller.update,
+ req, self.instance.uuid,
+ body={'server': {'name': 'test'}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('host_status', resp['server'])
+ self.assertEqual(
+ fields.HostStatus.UNKNOWN, resp['server']['host_status'])
+ for resp in unauthorize_res:
+ self.assertNotIn('host_status', resp['server'])
+
+ @mock.patch('nova.compute.api.API.create')
+ def test_create_requested_destination_server_policy(self,
+ mock_create):
+ # 'create' policy is checked before 'create:requested_destination' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = policies.SERVERS % 'create'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.74')
+
+ def fake_create(context, *args, **kwargs):
+ for attr in ['requested_host', 'requested_hypervisor_hostname']:
+ if context in self.project_admin_authorized_contexts:
+ self.assertIn(attr, kwargs)
+ if context in self.project_admin_unauthorized_contexts:
+ self.assertNotIn(attr, kwargs)
+ return ([self.instance], '')
+ mock_create.side_effect = fake_create
+
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': uuids.fake_id,
+ 'flavorRef': uuids.fake_id,
+ 'networks': [{
+ 'uuid': uuids.fake_id
+ }],
+ 'host': 'fake',
+ 'hypervisor_hostname': 'fake'
+ },
+ }
+
+ self.common_policy_check(self.project_admin_authorized_contexts,
+ self.project_admin_unauthorized_contexts,
+ self.rule_requested_destination,
+ self.controller.create,
+ req, body=body)
+
+ @mock.patch('nova.compute.api.API._check_requested_networks')
+ @mock.patch('nova.compute.api.API._allow_resize_to_same_host')
+ @mock.patch('nova.objects.RequestSpec.get_by_instance_uuid')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.api.openstack.common.get_instance')
+ @mock.patch('nova.api.openstack.common.'
+ 'instance_has_port_with_resource_request')
+ @mock.patch('nova.conductor.ComputeTaskAPI.resize_instance')
+ def test_cross_cell_resize_server_policy(self,
+ mock_resize, mock_port, mock_get, mock_save, mock_rs,
+ mock_allow, m_net):
+ self.stub_out('nova.compute.api.API.get_instance_host_status',
+ lambda x, y: "UP")
+
+ # 'migrate' policy is checked before 'resize:cross_cell' so
+ # we have to allow it for everyone otherwise it will
+ # fail for unauthorized contexts here.
+ rule = 'os_compute_api:os-migrate-server:migrate'
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ rule_name = policies.CROSS_CELL_RESIZE
+ mock_port.return_value = False
+ req = fakes.HTTPRequest.blank('', version='2.56')
+
+ def fake_get(*args, **kwargs):
+ return fake_instance.fake_instance_obj(
+ self.project_member_context,
+ id=1, uuid=uuids.fake_id, project_id=self.project_id,
+ user_id='fake-user', vm_state=vm_states.ACTIVE,
+ launched_at=timeutils.utcnow())
+
+ mock_get.side_effect = fake_get
+
+ def fake_validate(context, instance,
+ host_name, allow_cross_cell_resize):
+ if context in self.cross_cell_authorized_contexts:
+ self.assertTrue(allow_cross_cell_resize)
+ if context in self.cross_cell_unauthorized_contexts:
+ self.assertFalse(allow_cross_cell_resize)
+ return objects.ComputeNode(host=1, hypervisor_hostname=2)
+
+ self.stub_out(
+ 'nova.compute.api.API._validate_host_for_cold_migrate',
+ fake_validate)
+
+ self.common_policy_check(self.cross_cell_authorized_contexts,
+ self.cross_cell_unauthorized_contexts,
+ rule_name,
+ self.m_controller._migrate,
+ req, self.instance.uuid,
+ body={'migrate': {'host': 'fake'}},
+ fatal=False)
+
+ def test_network_attach_external_network_policy(self):
+ # NOTE(gmann): Testing policy 'network:attach_external_network'
+ # which raise different error then PolicyNotAuthorized
+ # if not allowed.
+ neutron_api = neutron.API()
+ for context in self.zero_disk_external_net_authorized_contexts:
+ neutron_api._check_external_network_attach(context,
+ [{'id': 1, 'router:external': 'ext'}])
+ for context in self.zero_disk_external_net_unauthorized_contexts:
+ self.assertRaises(exception.ExternalNetworkAttachForbidden,
+ neutron_api._check_external_network_attach,
+ context, [{'id': 1, 'router:external': 'ext'}])
+
+ def test_zero_disk_flavor_policy(self):
+ # NOTE(gmann): Testing policy 'create:zero_disk_flavor'
+ # which raise different error then PolicyNotAuthorized
+ # if not allowed.
+ image = {'id': uuids.image_id, 'status': 'foo'}
+ flavor = objects.Flavor(
+ vcpus=1, memory_mb=512, root_gb=0, extra_specs={'hw:pmu': "true"})
+ compute_api = compute.API()
+ for context in self.zero_disk_external_net_authorized_contexts:
+ compute_api._validate_flavor_image_nostatus(context,
+ image, flavor, None)
+ for context in self.zero_disk_external_net_unauthorized_contexts:
+ self.assertRaises(
+ exception.BootFromVolumeRequiredForZeroDiskFlavor,
+ compute_api._validate_flavor_image_nostatus,
+ context, image, flavor, None)
+
+
+class ServersScopeTypePolicyTest(ServersPolicyTest):
+ """Test Servers APIs policies with system scope enabled.
+ This class set the nova.conf [oslo_policy] enforce_scope to True
+ so that we can switch on the scope checking on oslo policy side.
+ It defines the set of context with scoped token
+ which are allowed and not allowed to pass the policy checks.
+ With those set of context, it will run the API operation and
+ verify the expected behaviour.
+ """
+
+ def setUp(self):
+ super(ServersScopeTypePolicyTest, self).setUp()
+ self.flags(enforce_scope=True, group="oslo_policy")
+
+ # These policy are project scoped only and 'create' policy is checked
+ # first so even we allow it for everyone the system scoped context
+ # cannot validate these as they fail on 'create' policy due to
+ # scope_type. So we need to set rule name as None to skip the policy
+ # error message assertion in base class. These rule name are only used
+ # for error message assertion.
+ self.rule_trusted_certs = None
+ self.rule_attach_network = None
+ self.rule_attach_volume = None
+ self.rule_requested_destination = None
+ self.rule_forced_host = None
+
+ # Check that system admin is able to create server with host request
+ # and get server extended attributes or host status.
+ self.admin_authorized_contexts = [
+ self.system_admin_context
+ ]
+ # Check that non-system/admin is not able to create server with
+ # host request and get server extended attributes or host status.
+ self.admin_unauthorized_contexts = [
+ self.project_admin_context, self.legacy_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ # Check that system reader is able to list the server
+ # for all projects.
+ self.system_reader_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system reader is not able to list the server
+ # for all projects.
+ self.system_reader_unauthorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+
+ # Check if project member can create the server.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.other_project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context
+ ]
+ # Check if non-project member cannot create the server.
+ self.project_member_unauthorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context, self.system_foo_context
+ ]
+
+ # Check that project admin is able to create server with requested
+ # destination.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ # Check that non-project admin is not able to create server with
+ # requested destination
+ self.project_admin_unauthorized_contexts = [
+ self.system_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+
+
+class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
+ """Test Servers APIs policies with system scope enabled,
+ and no more deprecated rules that allow the legacy admin API to
+ access system_admin_or_owner APIs.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServersNoLegacyPolicyTest, self).setUp()
+
+ # Check that system admin or owner is able to update, delete
+ # or perform server action.
+ self.admin_or_owner_authorized_contexts = [
+ self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ ]
+ # Check that non-system and non-admin/owner is not able to update,
+ # delete or perform server action.
+ self.admin_or_owner_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_member_context,
+ self.system_reader_context, self.project_reader_context,
+ self.project_foo_context,
+ self.system_foo_context, self.other_project_member_context,
+ self.other_project_reader_context]
+
+ # Check that system reader or projct owner is able to get
+ # server.
+ self.system_reader_or_owner_authorized_contexts = [
+ self.system_admin_context,
+ self.project_admin_context, self.system_member_context,
+ self.system_reader_context, self.project_reader_context,
+ self.project_member_context,
+ ]
+
+ # Check that non-system reader nd non-admin/owner is not able to get
+ # server.
+ self.system_reader_or_owner_unauthorized_contexts = [
+ self.legacy_admin_context, self.project_foo_context,
+ self.system_foo_context, self.other_project_member_context,
+ self.other_project_reader_context
+ ]
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
+ self.everyone_unauthorized_contexts = [
+ self.project_foo_context,
+ self.system_foo_context
+ ]
+ # Check if project member can create the server.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.other_project_member_context
+ ]
+ # Check if non-project member cannot create the server.
+ self.project_member_unauthorized_contexts = [
+ self.system_admin_context,
+ self.system_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.system_reader_context, self.system_foo_context
+ ]
+ # Check that system admin is able to get server extended attributes
+ # or host status.
+ self.server_attr_admin_authorized_contexts = [
+ self.system_admin_context]
+ # Check that non-system admin is not able to get server extended
+ # attributes or host status.
+ self.server_attr_admin_unauthorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context
+ ]
diff --git a/nova/tests/unit/policies/test_services.py b/nova/tests/unit/policies/test_services.py
index 48dc5e6b36..8d18812bf8 100644
--- a/nova/tests/unit/policies/test_services.py
+++ b/nova/tests/unit/policies/test_services.py
@@ -160,13 +160,13 @@ class ServicesDeprecatedPolicyTest(base.BasePolicyTest):
super(ServicesDeprecatedPolicyTest, self).setUp()
self.controller = services_v21.ServiceController()
self.member_req = fakes.HTTPRequest.blank('')
- self.member_req.environ['nova.context'] = self.project_member_context
+ self.member_req.environ['nova.context'] = self.system_reader_context
self.reader_req = fakes.HTTPRequest.blank('')
self.reader_req.environ['nova.context'] = self.project_reader_context
self.deprecated_policy = "os_compute_api:os-services"
# Overridde rule with different checks than defaults so that we can
# verify the rule overridden case.
- override_rules = {self.deprecated_policy: base_policy.PROJECT_MEMBER}
+ override_rules = {self.deprecated_policy: base_policy.SYSTEM_READER}
# NOTE(gmann): Only override the deprecated rule in policy file so
# that
# we can verify if overridden checks are considered by oslo.policy.
diff --git a/nova/tests/unit/policies/test_simple_tenant_usage.py b/nova/tests/unit/policies/test_simple_tenant_usage.py
index 382505e5a7..60eecdece8 100644
--- a/nova/tests/unit/policies/test_simple_tenant_usage.py
+++ b/nova/tests/unit/policies/test_simple_tenant_usage.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
+
from nova.api.openstack.compute import simple_tenant_usage
from nova.policies import simple_tenant_usage as policies
from nova.tests.unit.api.openstack import fakes
@@ -28,45 +30,46 @@ class SimpleTenantUsagePolicyTest(base.BasePolicyTest):
super(SimpleTenantUsagePolicyTest, self).setUp()
self.controller = simple_tenant_usage.SimpleTenantUsageController()
self.req = fakes.HTTPRequest.blank('')
+ self.controller._get_instances_all_cells = mock.MagicMock()
- # Check that admin or and owner is able to get
+ # Check that reader(legacy admin) or and owner is able to get
# the tenant usage statistics for a specific tenant.
- self.admin_or_owner_authorized_contexts = [
+ self.reader_or_owner_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to get
+ self.project_reader_context, self.project_foo_context,
+ self.system_member_context, self.system_reader_context]
+ # Check that non-reader(legacy non-admin) or owner is not able to get
# the tenant usage statistics for a specific tenant.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
+ self.reader_or_owner_unauthorized_contexts = [
self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context,
+ self.other_project_reader_context
]
- # Check that admin is able to get the tenant usage statistics.
- self.admin_authorized_contexts = [
+ # Check that reader is able to get the tenant usage statistics.
+ self.reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to get the tenant usage statistics.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
+ self.project_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-reader is not able to get the tenant usage statistics.
+ self.reader_unauthorized_contexts = [
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
+ self.other_project_reader_context
]
def test_index_simple_tenant_usage_policy(self):
rule_name = policies.POLICY_ROOT % 'list'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
+ self.common_policy_check(self.reader_authorized_contexts,
+ self.reader_unauthorized_contexts,
rule_name,
self.controller.index,
self.req)
def test_show_simple_tenant_usage_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
+ self.common_policy_check(self.reader_or_owner_authorized_contexts,
+ self.reader_or_owner_unauthorized_contexts,
rule_name,
self.controller.show,
self.req, self.project_id)
@@ -86,15 +89,41 @@ class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
super(SimpleTenantUsageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to get the tenant usage statistics.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system/admin is not able to get the tenant usage
+ # Check that system reader is able to get the tenant usage statistics.
+ self.reader_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context]
+ # Check that non-system/reader is not able to get the tenant usage
# statistics.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
+ self.reader_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_foo_context,
self.project_admin_context, self.project_member_context,
self.other_project_member_context,
- self.project_foo_context, self.project_reader_context
+ self.project_foo_context, self.project_reader_context,
+ self.other_project_reader_context
+ ]
+
+
+class SimpleTenantUsageNoLegacyPolicyTest(
+ SimpleTenantUsageScopeTypePolicyTest):
+ """Test Simple Tenant Usage APIs policies with system scope enabled,
+ and no more deprecated rules that allow the legacy admin API to
+ access system APIs.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(SimpleTenantUsageNoLegacyPolicyTest, self).setUp()
+ # Check that system reader or owner is able to get
+ # the tenant usage statistics for a specific tenant.
+ self.reader_or_owner_authorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context]
+ # Check that non-system reader/owner is not able to get
+ # the tenant usage statistics for a specific tenant.
+ self.reader_or_owner_unauthorized_contexts = [
+ self.legacy_admin_context, self.system_foo_context,
+ self.other_project_member_context,
+ self.project_foo_context, self.other_project_reader_context
]
diff --git a/nova/tests/unit/policies/test_suspend_server.py b/nova/tests/unit/policies/test_suspend_server.py
index 07b073f197..9a65321582 100644
--- a/nova/tests/unit/policies/test_suspend_server.py
+++ b/nova/tests/unit/policies/test_suspend_server.py
@@ -45,7 +45,7 @@ class SuspendServerPolicyTest(base.BasePolicyTest):
self.mock_get.return_value = self.instance
# Check that admin or and server owner is able to suspend/resume
- # the sevrer
+ # the server
self.admin_or_owner_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
@@ -92,7 +92,7 @@ class SuspendServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.suspend')
- def test_suspend_sevrer_overridden_policy_pass_with_same_user(
+ def test_suspend_server_overridden_policy_pass_with_same_user(
self, mock_suspend):
rule_name = policies.POLICY_ROOT % 'suspend'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
diff --git a/nova/tests/unit/privsep/test_qemu.py b/nova/tests/unit/privsep/test_qemu.py
index 5fbc178983..85c48aa4ae 100644
--- a/nova/tests/unit/privsep/test_qemu.py
+++ b/nova/tests/unit/privsep/test_qemu.py
@@ -52,3 +52,27 @@ class QemuTestCase(test.NoDBTestCase):
def test_convert_image_unprivileged(self):
self._test_convert_image(nova.privsep.qemu.unprivileged_convert_image)
+
+ @mock.patch('oslo_concurrency.processutils.execute')
+ @mock.patch('os.path.isdir')
+ def _test_qemu_img_info(self, method, mock_isdir, mock_execute):
+ mock_isdir.return_value = False
+ mock_execute.return_value = (mock.sentinel.out, None)
+ expected_cmd = (
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
+ mock.sentinel.path, '--force-share', '--output=json', '-f',
+ mock.sentinel.format)
+
+ # Assert that the output from processutils is returned
+ self.assertEqual(
+ mock.sentinel.out,
+ method(mock.sentinel.path, format=mock.sentinel.format))
+ # Assert that the expected command is used
+ mock_execute.assert_called_once_with(
+ *expected_cmd, prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
+
+ def test_privileged_qemu_img_info(self):
+ self._test_qemu_img_info(nova.privsep.qemu.privileged_qemu_img_info)
+
+ def test_unprivileged_qemu_img_info(self):
+ self._test_qemu_img_info(nova.privsep.qemu.unprivileged_qemu_img_info)
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index ba47285224..6f41b6343a 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -350,10 +350,10 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-quota-sets:update",
"os_compute_api:os-quota-sets:delete",
"os_compute_api:os-server-diagnostics",
+"os_compute_api:os-server-groups:index:all_projects",
"os_compute_api:os-services:update",
"os_compute_api:os-services:delete",
"os_compute_api:os-shelve:shelve_offload",
-"os_compute_api:os-simple-tenant-usage:list",
"os_compute_api:os-availability-zone:detail",
"os_compute_api:os-assisted-volume-snapshots:create",
"os_compute_api:os-assisted-volume-snapshots:delete",
@@ -388,7 +388,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:server-metadata:create",
"os_compute_api:server-metadata:update",
"os_compute_api:server-metadata:update_all",
-"os_compute_api:os-simple-tenant-usage:show",
"os_compute_api:os-suspend-server:suspend",
"os_compute_api:os-suspend-server:resume",
"os_compute_api:os-tenant-networks",
@@ -455,6 +454,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
self.system_reader_rules = (
"os_compute_api:servers:migrations:index",
"os_compute_api:servers:migrations:show",
+"os_compute_api:os-simple-tenant-usage:list",
"os_compute_api:os-migrations:index",
"os_compute_api:os-services:list",
"os_compute_api:os-instance-actions:events:details",
@@ -472,6 +472,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
)
self.system_reader_or_owner_rules = (
+"os_compute_api:os-simple-tenant-usage:show",
"os_compute_api:os-security-groups:list",
"os_compute_api:os-volumes-attachments:index",
"os_compute_api:os-volumes-attachments:show",
@@ -527,8 +528,9 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show',
'system_admin_api', 'system_reader_api',
- 'project_member_api', 'project_reader_api',
- 'system_admin_or_owner', 'system_or_project_reader')
+ 'project_admin_api', 'project_member_api',
+ 'project_reader_api', 'system_admin_or_owner',
+ 'system_or_project_reader')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules +
self.allow_all_rules + self.system_reader_rules +
diff --git a/nova/tests/unit/virt/hyperv/test_driver.py b/nova/tests/unit/virt/hyperv/test_driver.py
index 7d01f38c52..07f251390e 100644
--- a/nova/tests/unit/virt/hyperv/test_driver.py
+++ b/nova/tests/unit/virt/hyperv/test_driver.py
@@ -428,7 +428,8 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase):
mock.sentinel.context, mock.sentinel.migration,
mock.sentinel.instance, mock.sentinel.disk_info,
mock.sentinel.network_info, mock.sentinel.image_meta,
- mock.sentinel.resize_instance, mock.sentinel.block_device_info,
+ mock.sentinel.resize_instance, mock.sentinel.allocations,
+ mock.sentinel.block_device_info,
mock.sentinel.power_on)
self.driver._migrationops.finish_migration.assert_called_once_with(
diff --git a/nova/tests/unit/virt/libvirt/fake_imagebackend.py b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
index d73a396ab5..093fbbbcc0 100644
--- a/nova/tests/unit/virt/libvirt/fake_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
@@ -207,7 +207,8 @@ class ImageBackendFixture(fixtures.Fixture):
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(self, mock_disk, disk_info, cache_mode,
- extra_specs, hypervisor_version, disk_unit=None):
+ extra_specs, hypervisor_version, disk_unit=None,
+ boot_order=None):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
@@ -218,4 +219,6 @@ class ImageBackendFixture(fixtures.Fixture):
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
+ if boot_order:
+ info.boot_order = boot_order
return info
diff --git a/nova/tests/unit/virt/libvirt/fakelibvirt.py b/nova/tests/unit/virt/libvirt/fakelibvirt.py
index bc676eac5b..940cbef788 100644
--- a/nova/tests/unit/virt/libvirt/fakelibvirt.py
+++ b/nova/tests/unit/virt/libvirt/fakelibvirt.py
@@ -61,6 +61,7 @@ VIR_DOMAIN_XML_MIGRATABLE = 8
VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1
VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2
VIR_DOMAIN_BLOCK_REBASE_COPY = 8
+VIR_DOMAIN_BLOCK_REBASE_RELATIVE = 16
VIR_DOMAIN_BLOCK_REBASE_COPY_DEV = 32
# virDomainBlockResize
@@ -160,8 +161,6 @@ VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
# blockCommit flags
VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4
-# blockRebase flags
-VIR_DOMAIN_BLOCK_REBASE_RELATIVE = 8
VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
index 66fa5e544a..966e179280 100644
--- a/nova/tests/unit/virt/libvirt/test_blockinfo.py
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -76,22 +76,25 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
self.test_instance['old_flavor'] = None
self.test_instance['new_flavor'] = None
- def test_volume_in_mapping(self):
- swap = {'device_name': '/dev/sdb',
- 'swap_size': 1}
+ def _test_block_device_info(self, with_eph=True, with_swap=True,
+ with_bdms=True):
+ swap = {'device_name': '/dev/vdb', 'swap_size': 1}
ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4',
- 'device_name': '/dev/sdc1', 'size': 10},
+ 'device_name': '/dev/vdc1', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
- 'device_name': '/dev/sdd', 'size': 10}]
+ 'device_name': '/dev/vdd', 'size': 10}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
- block_device_info = {
- 'root_device_name': '/dev/sda',
- 'swap': swap,
- 'ephemerals': ephemerals,
- 'block_device_mapping': block_device_mapping}
+ return {'root_device_name': '/dev/vda',
+ 'swap': swap if with_swap else {},
+ 'ephemerals': ephemerals if with_eph else [],
+ 'block_device_mapping':
+ block_device_mapping if with_bdms else []}
+
+ def test_volume_in_mapping(self):
+ block_device_info = self._test_block_device_info()
def _assert_volume_in_mapping(device_name, true_or_false):
self.assertEqual(
@@ -99,10 +102,10 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
block_device.volume_in_mapping(device_name,
block_device_info))
- _assert_volume_in_mapping('sda', False)
- _assert_volume_in_mapping('sdb', True)
- _assert_volume_in_mapping('sdc1', True)
- _assert_volume_in_mapping('sdd', True)
+ _assert_volume_in_mapping('vda', False)
+ _assert_volume_in_mapping('vdb', True)
+ _assert_volume_in_mapping('vdc1', True)
+ _assert_volume_in_mapping('vdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
@@ -268,6 +271,206 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
}
self.assertEqual(expect, mapping)
+ def _test_get_disk_mapping_stable_rescue(
+ self, rescue_props, expected, block_device_info, with_local=False):
+ instance = objects.Instance(**self.test_instance)
+
+ # Make disk.local disks optional per test as found in
+ # nova.virt.libvirt.BlockInfo.get_default_ephemeral_info
+ instance.ephemeral_gb = '20' if with_local else None
+
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+
+ rescue_image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ rescue_props = objects.ImageMetaProps.from_dict(rescue_props)
+ rescue_image_meta.properties = rescue_props
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance, "virtio", "ide",
+ image_meta, rescue=True, block_device_info=block_device_info,
+ rescue_image_meta=rescue_image_meta)
+
+ # Assert that the expected mapping is returned from get_disk_mapping
+ self.assertEqual(expected, mapping)
+
+ def test_get_disk_mapping_stable_rescue_virtio_disk(self):
+ """Assert the disk mapping when rescuing using a virtio disk"""
+ rescue_props = {'hw_rescue_bus': 'virtio'}
+ block_info = self._test_block_device_info(
+ with_eph=False, with_swap=False, with_bdms=False)
+ expected = {
+ 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'},
+ 'disk.rescue': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'}
+ }
+ self._test_get_disk_mapping_stable_rescue(
+ rescue_props, expected, block_info)
+
+ def test_get_disk_mapping_stable_rescue_ide_disk(self):
+ """Assert the disk mapping when rescuing using an IDE disk"""
+ rescue_props = {'hw_rescue_bus': 'ide'}
+ block_info = self._test_block_device_info(
+ with_eph=False, with_swap=False, with_bdms=False)
+ expected = {
+ 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'},
+ 'disk.rescue': {'bus': 'ide', 'dev': 'hda', 'type': 'disk'},
+ 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'}
+ }
+ self._test_get_disk_mapping_stable_rescue(
+ rescue_props, expected, block_info)
+
+ def test_get_disk_mapping_stable_rescue_usb_disk(self):
+ """Assert the disk mapping when rescuing using a USB disk"""
+ rescue_props = {'hw_rescue_bus': 'usb'}
+ block_info = self._test_block_device_info(
+ with_eph=False, with_swap=False, with_bdms=False)
+ expected = {
+ 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'},
+ 'disk.rescue': {'bus': 'usb', 'dev': 'sda', 'type': 'disk'},
+ 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'}
+ }
+ self._test_get_disk_mapping_stable_rescue(
+ rescue_props, expected, block_info)
+
+ def test_get_disk_mapping_stable_rescue_ide_cdrom(self):
+ """Assert the disk mapping when rescuing using an IDE cd-rom"""
+ rescue_props = {'hw_rescue_device': 'cdrom'}
+ block_info = self._test_block_device_info(
+ with_eph=False, with_swap=False, with_bdms=False)
+ expected = {
+ 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'},
+ 'disk.rescue': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom'},
+ 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'}
+ }
+ self._test_get_disk_mapping_stable_rescue(
+ rescue_props, expected, block_info)
+
+ def test_get_disk_mapping_stable_rescue_virtio_disk_with_local(self):
+ """Assert the disk mapping when rescuing using a virtio disk with
+ default ephemeral (local) disks also attached to the instance.
+ """
+ rescue_props = {'hw_rescue_bus': 'virtio'}
+ block_info = self._test_block_device_info(
+ with_eph=False, with_swap=False, with_bdms=False)
+ expected = {
+ 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.rescue': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'}
+ }
+ self._test_get_disk_mapping_stable_rescue(
+ rescue_props, expected, block_info, with_local=True)
+
+ def test_get_disk_mapping_stable_rescue_virtio_disk_with_eph(self):
+ """Assert the disk mapping when rescuing using a virtio disk with
+ ephemeral disks also attached to the instance.
+ """
+ rescue_props = {'hw_rescue_bus': 'virtio'}
+ block_info = self._test_block_device_info(
+ with_swap=False, with_bdms=False)
+ expected = {
+ 'disk': {
+ 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'},
+ 'disk.eph0': {
+ 'bus': 'virtio', 'dev': 'vdc1', 'format': 'ext4',
+ 'type': 'disk'},
+ 'disk.eph1': {
+ 'bus': 'ide', 'dev': 'vdd', 'type': 'disk'},
+ 'disk.rescue': {
+ 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {
+ 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'}
+ }
+ self._test_get_disk_mapping_stable_rescue(
+ rescue_props, expected, block_info, with_local=True)
+
+ def test_get_disk_mapping_stable_rescue_virtio_disk_with_swap(self):
+ """Assert the disk mapping when rescuing using a virtio disk with
+ swap attached to the instance.
+ """
+ rescue_props = {'hw_rescue_bus': 'virtio'}
+ block_info = self._test_block_device_info(
+ with_eph=False, with_bdms=False)
+ expected = {
+ 'disk': {
+ 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'},
+ 'disk.rescue': {
+ 'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.swap': {
+ 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {
+ 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'}
+ }
+ self._test_get_disk_mapping_stable_rescue(
+ rescue_props, expected, block_info)
+
+ def test_get_disk_mapping_stable_rescue_virtio_disk_with_bdm(self):
+ """Assert the disk mapping when rescuing using a virtio disk with
+ volumes also attached to the instance.
+ """
+ rescue_props = {'hw_rescue_bus': 'virtio'}
+ block_info = self._test_block_device_info(
+ with_eph=False, with_swap=False)
+ expected = {
+ '/dev/sde': {
+ 'bus': 'scsi', 'dev': 'sde', 'type': 'disk'},
+ '/dev/sdf': {
+ 'bus': 'scsi', 'dev': 'sdf', 'type': 'disk'},
+ 'disk': {
+ 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'},
+ 'disk.rescue': {
+ 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {
+ 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'}
+ }
+ self._test_get_disk_mapping_stable_rescue(
+ rescue_props, expected, block_info)
+
+ def test_get_disk_mapping_stable_rescue_virtio_disk_with_everything(self):
+ """Assert the disk mapping when rescuing using a virtio disk with
+ volumes, ephemerals and swap also attached to the instance.
+ """
+ rescue_props = {'hw_rescue_bus': 'virtio'}
+ block_info = self._test_block_device_info()
+ expected = {
+ '/dev/sde': {
+ 'bus': 'scsi', 'dev': 'sde', 'type': 'disk'},
+ '/dev/sdf': {
+ 'bus': 'scsi', 'dev': 'sdf', 'type': 'disk'},
+ 'disk': {
+ 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'},
+ 'disk.eph0': {
+ 'bus': 'virtio', 'dev': 'vdc1', 'format': 'ext4',
+ 'type': 'disk'},
+ 'disk.eph1': {
+ 'bus': 'ide', 'dev': 'vdd', 'type': 'disk'},
+ 'disk.rescue': {
+ 'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.swap': {
+ 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {
+ 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk'}
+ }
+ self._test_get_disk_mapping_stable_rescue(
+ rescue_props, expected, block_info, with_local=True)
+
def test_get_disk_mapping_lxc(self):
# A simple disk mapping setup, but for lxc
@@ -1077,6 +1280,40 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
expected_order = ['hd', 'cdrom']
self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+ def _get_rescue_image_meta(self, props_dict):
+ meta_dict = dict(self.test_image_meta)
+ meta_dict['properties'] = props_dict
+ return objects.ImageMeta.from_dict(meta_dict)
+
+ def test_get_rescue_device(self):
+ # Assert that all supported device types are returned correctly
+ for device in blockinfo.SUPPORTED_DEVICE_TYPES:
+ meta = self._get_rescue_image_meta({'hw_rescue_device': device})
+ self.assertEqual(device, blockinfo.get_rescue_device(meta))
+
+ # Assert that disk is returned if hw_rescue_device isn't set
+ meta = self._get_rescue_image_meta({'hw_rescue_bus': 'virtio'})
+ self.assertEqual('disk', blockinfo.get_rescue_device(meta))
+
+ # Assert that UnsupportedHardware is raised for unsupported devices
+ meta = self._get_rescue_image_meta({'hw_rescue_device': 'fs'})
+ self.assertRaises(exception.UnsupportedRescueDevice,
+ blockinfo.get_rescue_device, meta)
+
+ def test_get_rescue_bus(self):
+ # Assert that all supported device bus types are returned. Stable
+ # device rescue is not supported by xen or lxc so ignore these.
+ for virt_type in ['qemu', 'kvm', 'uml', 'parallels']:
+ for bus in blockinfo.SUPPORTED_DEVICE_BUSES[virt_type]:
+ meta = self._get_rescue_image_meta({'hw_rescue_bus': bus})
+ self.assertEqual(bus, blockinfo.get_rescue_bus(None, virt_type,
+ meta, None))
+
+ # Assert that UnsupportedHardware is raised for unsupported devices
+ meta = self._get_rescue_image_meta({'hw_rescue_bus': 'xen'})
+ self.assertRaises(exception.UnsupportedRescueBus,
+ blockinfo.get_rescue_bus, None, 'kvm', meta, 'disk')
+
class DefaultDeviceNamesTestCase(test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index c23760efab..8bc24b0b9d 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -4638,6 +4638,13 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_support.assert_called_once_with()
self.assertEqual(cfg.os_loader_type, "pflash")
+ @mock.patch('os.path.exists', return_value=True)
+ def test_check_uefi_support_aarch64(self, mock_exist):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self._stub_host_capabilities_cpu_arch(fields.Architecture.AARCH64)
+ self.assertTrue(drvr._has_uefi_support())
+ self.assertTrue(drvr._check_uefi_support(None))
+
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -5573,6 +5580,52 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = libvirt_guest.Guest(FakeVirtDomain())
return drvr._get_serial_ports_from_guest(guest, mode=mode)
+ def test_get_scsi_controller_next_unit_from_guest(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file' device='disk'>
+ <target dev='sda' bus='scsi'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='0'/>
+ </disk>
+ <disk type='file' device='disk'>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='file' device='cdrom'>
+ <target dev='sdc' bus='scsi'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='1'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ self._test_get_scsi_controller_next_unit_from_guest(xml, 2)
+
+ def test_get_scsi_controller_next_unit_from_guest_no_scsi(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file' device='disk'>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='file' device='disk'>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ self._test_get_scsi_controller_next_unit_from_guest(xml, 0)
+
+ @mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
+ def _test_get_scsi_controller_next_unit_from_guest(self, xml,
+ expect_num,
+ mock_get_xml_desc):
+ mock_get_xml_desc.return_value = xml
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ guest = libvirt_guest.Guest(FakeVirtDomain())
+ i = drvr._get_scsi_controller_next_unit(guest)
+ self.assertEqual(expect_num, i)
+
def test_get_guest_config_with_type_xen(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
@@ -8591,12 +8644,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(key_manager, 'API')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
- @mock.patch.object(libvirt_driver.LibvirtDriver, '_is_luks_v1')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_allow_native_luksv1')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
@mock.patch('nova.virt.libvirt.host.Host')
@mock.patch('os_brick.encryptors.luks.is_luks')
def test_connect_volume_luks(self, mock_is_volume_luks, mock_host,
- mock_get_volume_encryptor, mock_is_luks_v1,
+ mock_get_volume_encryptor, mock_allow_native_luksv1,
mock_get_volume_encryption, mock_get_key_mgr):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -8623,9 +8676,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_key.get_encoded.return_value = key_encoded
# assert that the secret is created for the encrypted volume during
- # _connect_volume when _is_luks_v1 is True
+ # _connect_volume when _allow_native_luksv1 is True
mock_get_volume_encryption.return_value = encryption
- mock_is_luks_v1.return_value = True
+ mock_allow_native_luksv1.return_value = True
drvr._connect_volume(self.context, connection_info, instance,
encryption=encryption)
@@ -8636,7 +8689,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# assert that the encryptor is used if is_luks is False
drvr._host.create_secret.reset_mock()
mock_get_volume_encryption.reset_mock()
- mock_is_luks_v1.return_value = False
+ mock_allow_native_luksv1.return_value = False
drvr._connect_volume(self.context, connection_info, instance,
encryption=encryption)
@@ -8645,7 +8698,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
**encryption)
# assert that we format the volume if it is not already formatted
- mock_is_luks_v1.return_value = True
+ mock_allow_native_luksv1.return_value = True
mock_is_volume_luks.return_value = False
drvr._connect_volume(self.context, connection_info, instance,
@@ -8653,6 +8706,54 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_encryptor._format_volume.assert_called_once_with(key,
**encryption)
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
+ def test_connect_volume_native_luks_workaround(self,
+ mock_get_volume_encryptor, mock_get_volume_encryption):
+ self.flags(disable_native_luksv1=True, group='workarounds')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ connection_info = {'driver_volume_type': 'fake',
+ 'data': {'device_path': '/fake',
+ 'access_mode': 'rw',
+ 'volume_id': uuids.volume_id}}
+ encryption = {'provider': encryptors.LUKS,
+ 'encryption_key_id': uuids.encryption_key_id}
+ instance = mock.sentinel.instance
+ mock_encryptor = mock.Mock()
+ mock_get_volume_encryptor.return_value = mock_encryptor
+ mock_get_volume_encryption.return_value = encryption
+
+ drvr._connect_volume(self.context, connection_info, instance,
+ encryption=encryption)
+
+ # Assert that the os-brick encryptors are attached
+ mock_encryptor.attach_volume.assert_called_once_with(
+ self.context, **encryption)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
+ def test_disconnect_volume_native_luks_workaround(self,
+ mock_get_volume_encryptor, mock_get_volume_encryption):
+ self.flags(disable_native_luksv1=True, group='workarounds')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr._host = mock.Mock()
+ drvr._host.find_secret.return_value = None
+ connection_info = {'driver_volume_type': 'fake',
+ 'data': {'device_path': '/fake',
+ 'access_mode': 'rw',
+ 'volume_id': uuids.volume_id}}
+ encryption = {'provider': encryptors.LUKS,
+ 'encryption_key_id': uuids.encryption_key_id}
+ instance = mock.sentinel.instance
+ mock_encryptor = mock.Mock()
+ mock_get_volume_encryptor.return_value = mock_encryptor
+ mock_get_volume_encryption.return_value = encryption
+
+ drvr._disconnect_volume(self.context, connection_info, instance)
+
+ mock_encryptor.detach_volume.assert_called_once_with(
+ **encryption)
+
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
def test_disconnect_volume_luks(self, mock_get_volume_encryptor):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -9409,8 +9510,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_encryption_metadata.assert_called_once_with(
self.context, drvr._volume_api, uuids.volume_id, connection_info)
- mock_qemu_img_info.assert_called_once_with(
- mock.sentinel.device_path, output_format='json')
+ mock_qemu_img_info.assert_called_once_with(mock.sentinel.device_path)
# Assert that the Libvirt call to resize the device within the instance
# is called with the LUKSv1 payload offset taken into account.
@@ -9467,8 +9567,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_encryption_metadata.assert_called_once_with(
self.context, drvr._volume_api, uuids.volume_id, connection_info)
- mock_qemu_img_info.assert_called_once_with(
- 'rbd:pool/volume', output_format='json')
+ mock_qemu_img_info.assert_called_once_with('rbd:pool/volume')
# Assert that the Libvirt call to resize the device within the instance
# is called with the LUKSv1 payload offset taken into account.
@@ -9595,7 +9694,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_key_mgr.get.return_value = mock_key
mock_key.get_encoded.return_value = key_encoded
- with mock.patch.object(drvr, '_is_luks_v1', return_value=True):
+ with mock.patch.object(drvr, '_allow_native_luksv1',
+ return_value=True):
with mock.patch.object(drvr._host, 'create_secret') as crt_scrt:
drvr._attach_encryptor(self.context, connection_info,
encryption)
@@ -9657,9 +9757,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._is_luks_v1')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._allow_native_luksv1')
def test_detach_encryptor_encrypted_volume_meta_missing(self,
- mock_is_luks_v1, mock_get_encryptor, mock_get_metadata):
+ mock_allow_native_luksv1, mock_get_encryptor, mock_get_metadata):
"""Assert that if missing the encryption metadata of an encrypted
volume is fetched and then used to detach the encryptor for the volume.
"""
@@ -9669,7 +9769,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
encryption = {'provider': 'luks', 'control_location': 'front-end'}
mock_get_metadata.return_value = encryption
connection_info = {'data': {'volume_id': uuids.volume_id}}
- mock_is_luks_v1.return_value = False
+ mock_allow_native_luksv1.return_value = False
drvr._detach_encryptor(self.context, connection_info, None)
@@ -9681,9 +9781,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._is_luks_v1')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._allow_native_luksv1')
def test_detach_encryptor_encrypted_volume_meta_provided(self,
- mock_is_luks_v1, mock_get_encryptor, mock_get_metadata):
+ mock_allow_native_luksv1, mock_get_encryptor, mock_get_metadata):
"""Assert that when provided there are no further attempts to fetch the
encryption metadata for the volume and that the provided metadata is
then used to detach the volume.
@@ -9693,7 +9793,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_encryptor.return_value = mock_encryptor
encryption = {'provider': 'luks', 'control_location': 'front-end'}
connection_info = {'data': {'volume_id': uuids.volume_id}}
- mock_is_luks_v1.return_value = False
+ mock_allow_native_luksv1.return_value = False
drvr._detach_encryptor(self.context, connection_info, encryption)
@@ -9703,10 +9803,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_encryptor.detach_volume.assert_called_once_with(**encryption)
@mock.patch('nova.virt.libvirt.host.Host.find_secret')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._is_luks_v1')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._allow_native_luksv1')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_detach_encryptor_native_luks_device_path_secret_missing(self,
- mock_get_encryptor, mock_is_luks_v1, mock_find_secret):
+ mock_get_encryptor, mock_allow_native_luksv1, mock_find_secret):
"""Assert that the encryptor is not built when native LUKS is
available, the associated volume secret is missing and device_path is
also missing from the connection_info.
@@ -9716,28 +9816,37 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'encryption_key_id': uuids.encryption_key_id}
connection_info = {'data': {'volume_id': uuids.volume_id}}
mock_find_secret.return_value = False
- mock_is_luks_v1.return_value = True
+ mock_allow_native_luksv1.return_value = True
drvr._detach_encryptor(self.context, connection_info, encryption)
mock_find_secret.assert_called_once_with('volume', uuids.volume_id)
mock_get_encryptor.assert_not_called()
- def test_is_luks_v1(self):
+ def test_allow_native_luksv1(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertFalse(drvr._is_luks_v1({}))
- self.assertFalse(drvr._is_luks_v1({
+ self.assertFalse(drvr._allow_native_luksv1({}))
+ self.assertFalse(drvr._allow_native_luksv1({
'provider': 'nova.volume.encryptors.cryptsetup.CryptSetupEncryptor'
}))
- self.assertFalse(drvr._is_luks_v1({
+ self.assertFalse(drvr._allow_native_luksv1({
'provider': 'CryptSetupEncryptor'}))
- self.assertFalse(drvr._is_luks_v1({
+ self.assertFalse(drvr._allow_native_luksv1({
'provider': encryptors.PLAIN}))
- self.assertTrue(drvr._is_luks_v1({
+ self.assertTrue(drvr._allow_native_luksv1({
+ 'provider': 'nova.volume.encryptors.luks.LuksEncryptor'}))
+ self.assertTrue(drvr._allow_native_luksv1({
+ 'provider': 'LuksEncryptor'}))
+ self.assertTrue(drvr._allow_native_luksv1({
+ 'provider': encryptors.LUKS}))
+
+ # Assert the disable_qemu_native_luksv workaround always returns False
+ self.flags(disable_native_luksv1=True, group='workarounds')
+ self.assertFalse(drvr._allow_native_luksv1({
'provider': 'nova.volume.encryptors.luks.LuksEncryptor'}))
- self.assertTrue(drvr._is_luks_v1({
+ self.assertFalse(drvr._allow_native_luksv1({
'provider': 'LuksEncryptor'}))
- self.assertTrue(drvr._is_luks_v1({
+ self.assertFalse(drvr._allow_native_luksv1({
'provider': encryptors.LUKS}))
def test_multi_nic(self):
@@ -18957,11 +19066,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
save.assert_called_once_with()
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
- @mock.patch.object(libvirt_driver.LibvirtDriver, '_is_luks_v1')
- def test_swap_volume_native_luks_blocked(self, mock_is_luks_v1,
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_allow_native_luksv1')
+ def test_swap_volume_native_luks_blocked(self, mock_allow_native_luksv1,
mock_get_encryption):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
- mock_is_luks_v1.return_value = True
+ mock_allow_native_luksv1.return_value = True
# dest volume is encrypted
mock_get_encryption.side_effect = [{}, {'provider': 'luks'}]
@@ -19590,13 +19699,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
if rescue:
rescue_data = ct_instance
+ disk_info = {'mapping': {'root': {'dev': 'hda'},
+ 'disk.rescue': {'dev': 'hda'}}}
else:
rescue_data = None
+ disk_info = {'mapping': {'disk': {}}}
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self),
- image_meta, {'mapping': {'disk': {}}},
- rescue_data)
+ image_meta, disk_info, rescue_data)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
@@ -19795,10 +19906,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('os.path.exists')
@mock.patch('os.path.getsize')
@mock.patch('os.path.isdir')
- @mock.patch('oslo_concurrency.processutils.execute')
+ @mock.patch('nova.virt.images.qemu_img_info')
@mock.patch.object(host.Host, '_get_domain')
def test_get_instance_disk_info_parallels_ct(self, mock_get_domain,
- mock_execute,
+ mock_qemu_img_info,
mock_isdir,
mock_getsize,
mock_exists,
@@ -19813,10 +19924,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"<target dir='/'/></filesystem>"
"</devices></domain>")
- ret = ("image: /test/disk/root.hds\n"
- "file format: parallels\n"
- "virtual size: 20G (21474836480 bytes)\n"
- "disk size: 789M\n")
+ mock_qemu_img_info.return_value = mock.Mock(
+ virtual_size=21474836480, image="/test/disk/root.hds",
+ file_format="ploop", size=827327254, backing_file=None)
self.flags(virt_type='parallels', group='libvirt')
instance = objects.Instance(**self.test_instance)
@@ -19836,7 +19946,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_getsize.side_effect = getsize_sideeffect
mock_exists.return_value = True
mock_isdir.return_value = True
- mock_execute.return_value = (ret, '')
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
@@ -21388,6 +21497,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_rename.assert_has_calls([
mock.call(_path_qcow, path)])
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_allocate_mdevs')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_inject_data')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(libvirt_driver.LibvirtDriver,
@@ -21408,6 +21518,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_raw_to_qcow2,
mock_create_domain_and_network,
mock_get_info, mock_inject_data,
+ mock_alloc_mdevs,
power_on=True, resize_instance=False):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration.
@@ -21418,6 +21529,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
else:
state = power_state.SHUTDOWN
mock_get_info.return_value = hardware.InstanceInfo(state=state)
+ mock_alloc_mdevs.return_value = []
instance = self._create_instance(
{'config_drive': str(True),
@@ -21443,7 +21555,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.drvr.finish_migration(
context.get_admin_context(), migration, instance,
disk_info_text, [], image_meta,
- resize_instance, bdi, power_on)
+ resize_instance, mock.ANY, bdi, power_on)
# Assert that we converted the root, ephemeral, and swap disks
instance_path = libvirt_utils.get_instance_path(instance)
@@ -21478,6 +21590,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# We shouldn't be injecting data during migration
self.assertFalse(mock_inject_data.called)
+ mock_alloc_mdevs.assert_called_once_with(mock.ANY)
# NOTE(mdbooth): If we wanted to check the generated xml, we could
# insert a hook here
mock_create_domain_and_network.assert_called_once_with(
@@ -21533,7 +21646,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def fake_to_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
- block_device_info=None):
+ block_device_info=None, mdevs=None):
return ""
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._get_guest_xml',
@@ -21573,11 +21686,15 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.events_passed_to_fake_create = [
('network-vif-plugged', uuids.normal_vif)]
- self.drvr.finish_revert_migration(
- context.get_admin_context(), ins_ref, network_info, migration,
- None, power_on)
+ with mock.patch.object(
+ self.drvr, '_get_all_assigned_mediated_devices',
+ return_value={}) as mock_get_a_mdevs:
+ self.drvr.finish_revert_migration(
+ context.get_admin_context(), ins_ref, network_info,
+ migration, None, power_on)
self.assertTrue(self.fake_create_domain_called)
+ mock_get_a_mdevs.assert_called_once_with(mock.ANY)
def test_finish_revert_migration_power_on(self):
migration = objects.Migration(id=42, source_compute='fake-host1',
@@ -21618,8 +21735,10 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock.patch.object(drvr, '_get_guest_xml'),
mock.patch.object(shutil, 'rmtree'),
mock.patch.object(loopingcall, 'FixedIntervalLoopingCall'),
+ mock.patch.object(drvr, '_get_all_assigned_mediated_devices',
+ return_value={}),
) as (mock_stat, mock_path, mock_rename, mock_cdn, mock_ggx,
- mock_rmtree, mock_looping_call):
+ mock_rmtree, mock_looping_call, mock_get_a_mdevs):
mock_path.return_value = '/fake/foo'
if del_inst_failed:
mock_rmtree.side_effect = OSError(errno.ENOENT,
@@ -21649,7 +21768,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test_finish_revert_migration_preserves_disk_bus(self):
def fake_get_guest_xml(context, instance, network_info, disk_info,
- image_meta, block_device_info=None):
+ image_meta, block_device_info=None, mdevs=None):
self.assertEqual('ide', disk_info['disk_bus'])
image_meta = {"disk_format": "raw",
@@ -21667,7 +21786,10 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
return_value=image_meta),
mock.patch.object(drvr, '_get_guest_xml',
side_effect=fake_get_guest_xml),
- ) as (mock_img_bkend, mock_cdan, mock_gifsm, mock_ggxml):
+ mock.patch.object(drvr, '_get_all_assigned_mediated_devices',
+ return_value={}),
+ ) as (mock_img_bkend, mock_cdan, mock_gifsm, mock_ggxml,
+ mock_get_a_mdevs):
drvr.finish_revert_migration('', instance,
network_model.NetworkInfo(),
migration, power_on=False)
@@ -21683,8 +21805,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
with test.nested(
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(drvr, '_create_domain_and_network'),
- mock.patch.object(drvr, '_get_guest_xml')) as (
- mock_image, mock_cdn, mock_ggx):
+ mock.patch.object(drvr, '_get_guest_xml'),
+ mock.patch.object(drvr, '_get_all_assigned_mediated_devices'),
+ ) as (mock_image, mock_cdn, mock_ggx, mock_get_a_mdevs):
mock_image.return_value = {'disk_format': 'raw'}
drvr.finish_revert_migration('', ins_ref,
network_model.NetworkInfo(),
@@ -21706,8 +21829,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
with test.nested(
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(drvr, '_create_domain_and_network'),
- mock.patch.object(drvr, '_get_guest_xml')) as (
- mock_image, mock_cdn, mock_ggx):
+ mock.patch.object(drvr, '_get_guest_xml'),
+ mock.patch.object(drvr, '_get_all_assigned_mediated_devices'),
+ ) as (mock_image, mock_cdn, mock_ggx, mock_get_a_mdevs):
mock_image.return_value = {'disk_format': 'raw'}
drvr.image_backend.rollback_to_snap.side_effect = (
exception.SnapshotNotFound(snapshot_id='testing'))
@@ -21729,8 +21853,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock.patch.object(rbd_utils, 'RBDDriver'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(drvr, '_create_domain_and_network'),
- mock.patch.object(drvr, '_get_guest_xml')) as (
- mock_rbd, mock_image, mock_cdn, mock_ggx):
+ mock.patch.object(drvr, '_get_guest_xml'),
+ mock.patch.object(drvr, '_get_all_assigned_mediated_devices'),
+ ) as (mock_rbd, mock_image, mock_cdn, mock_ggx, mock_get_a_mdevs):
mock_image.return_value = {'disk_format': 'raw'}
drvr.finish_revert_migration('', ins_ref,
network_model.NetworkInfo(),
@@ -22532,6 +22657,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_detach.assert_called_once_with(expected.to_xml(),
flags=expected_flags)
+ @mock.patch('nova.objects.block_device.BlockDeviceMapping.save',
+ new=mock.Mock())
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
'_get_all_assigned_mediated_devices')
@mock.patch('nova.virt.libvirt.utils.write_to_file')
@@ -22541,13 +22669,12 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
@mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata')
@mock.patch('nova.privsep.utils.supports_direct_io')
@mock.patch('nova.api.metadata.base.InstanceMetadata')
- def _test_rescue(self, instance,
- mock_instance_metadata, mock_supports_direct_io,
- mock_build_device_metadata, mock_set_host_enabled,
- mock_write_to_file,
- mock_get_mdev,
- image_meta_dict=None,
- exists=None):
+ def _test_rescue(self, instance, mock_instance_metadata,
+ mock_supports_direct_io, mock_build_device_metadata,
+ mock_set_host_enabled, mock_write_to_file, mock_get_mdev,
+ mock_get_image_meta_by_ref, image_meta_dict=None, exists=None,
+ instance_image_meta_dict=None, block_device_info=None):
+
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
mock_build_device_metadata.return_value = None
mock_supports_direct_io.return_value = True
@@ -22561,6 +22688,10 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
image_meta_dict = {'id': uuids.image_id, 'name': 'fake'}
image_meta = objects.ImageMeta.from_dict(image_meta_dict)
+ if instance_image_meta_dict:
+ meta = objects.ImageMeta.from_dict(instance_image_meta_dict)
+ mock_get_image_meta_by_ref.return_value = meta
+
network_info = _fake_network_info(self)
rescue_password = 'fake_password'
@@ -22572,11 +22703,15 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
if post_xml_callback is not None:
post_xml_callback()
- with mock.patch.object(
- self.drvr, '_create_domain',
- side_effect=fake_create_domain) as mock_create_domain:
+ with test.nested(
+ mock.patch.object(self.drvr, '_create_domain',
+ side_effect=fake_create_domain),
+ mock.patch.object(self.drvr, '_connect_volume'),
+ ) as (mock_create_domain, mock_connect_volume):
+
self.drvr.rescue(self.context, instance,
- network_info, image_meta, rescue_password, None)
+ network_info, image_meta, rescue_password,
+ block_device_info)
self.assertTrue(mock_create_domain.called)
@@ -22693,6 +22828,259 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual(expected_kernel_ramdisk_paths,
kernel_ramdisk_paths)
+ @mock.patch('nova.virt.libvirt.utils.write_to_file')
+ def test_rescue_stable_device_unsupported_virt_types(self,
+ mock_libvirt_write_to_file):
+ network_info = _fake_network_info(self, 1)
+ instance = self._create_instance({'config_drive': str(True)})
+ rescue_image_meta_dict = {'id': uuids.rescue_image_id,
+ 'name': 'rescue',
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'virtio'}}
+ rescue_image_meta = objects.ImageMeta.from_dict(rescue_image_meta_dict)
+
+ # Assert that InstanceNotRescuable is raised for xen and lxc virt_types
+ self.flags(virt_type='xen', group='libvirt')
+ self.assertRaises(exception.InstanceNotRescuable, self.drvr.rescue,
+ self.context, instance, network_info,
+ rescue_image_meta, None, None)
+
+ self.flags(virt_type='lxc', group='libvirt')
+ self.assertRaises(exception.InstanceNotRescuable, self.drvr.rescue,
+ self.context, instance, network_info,
+ rescue_image_meta, None, None)
+
+ def test_rescue_stable_device(self):
+ # Assert the imagebackend behaviour and domain device layout
+ instance = self._create_instance({'config_drive': str(True)})
+ inst_image_meta_dict = {'id': uuids.image_id, 'name': 'fake'}
+ rescue_image_meta_dict = {'id': uuids.rescue_image_id,
+ 'name': 'rescue',
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'virtio'}}
+ block_device_info = {'root_device_name': '/dev/vda',
+ 'ephemerals': [
+ {'guest_format': None,
+ 'disk_bus': 'virtio',
+ 'device_name': '/dev/vdb',
+ 'size': 20,
+ 'device_type': 'disk'}],
+ 'swap': None,
+ 'block_device_mapping': None}
+
+ backend, domain = self._test_rescue(
+ instance,
+ image_meta_dict=rescue_image_meta_dict,
+ instance_image_meta_dict=inst_image_meta_dict,
+ block_device_info=block_device_info)
+
+ # Assert that we created the expected set of disks, and no others
+ self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'],
+ sorted(backend.created_disks.keys()))
+
+ # Assert that the original disks are presented first with the rescue
+ # disk attached as the final device in the domain.
+ expected_disk_paths = [backend.disks[name].path for name
+ in ('disk', 'disk.eph0', 'disk.config',
+ 'disk.rescue')]
+ disk_paths = domain.xpath('devices/disk/source/@file')
+ self.assertEqual(expected_disk_paths, disk_paths)
+
+ # Assert that the disk.rescue device has a boot order of 1
+ disk_path = backend.disks['disk.rescue'].path
+ query = "devices/disk[source/@file = '%s']/boot/@order" % disk_path
+ self.assertEqual('1', domain.xpath(query)[0])
+
+ def test_rescue_stable_device_with_volume_attached(self):
+ # Assert the imagebackend behaviour and domain device layout
+ instance = self._create_instance({'config_drive': str(True)})
+ inst_image_meta_dict = {'id': uuids.image_id, 'name': 'fake'}
+ rescue_image_meta_dict = {'id': uuids.rescue_image_id,
+ 'name': 'rescue',
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'virtio'}}
+ conn_info = {'driver_volume_type': 'iscsi',
+ 'data': {'device_path': '/dev/sdb'}}
+ bdm = objects.BlockDeviceMapping(
+ self.context,
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vdd'}))
+ bdms = driver_block_device.convert_volumes([bdm])
+ block_device_info = {'root_device_name': '/dev/vda',
+ 'ephemerals': [
+ {'guest_format': None,
+ 'disk_bus': 'virtio',
+ 'device_name': '/dev/vdb',
+ 'size': 20,
+ 'device_type': 'disk'}],
+ 'swap': None,
+ 'block_device_mapping': bdms}
+ bdm = block_device_info['block_device_mapping'][0]
+ bdm['connection_info'] = conn_info
+
+ backend, domain = self._test_rescue(
+ instance,
+ image_meta_dict=rescue_image_meta_dict,
+ instance_image_meta_dict=inst_image_meta_dict,
+ block_device_info=block_device_info)
+
+ # Assert that we created the expected set of disks, and no others
+ self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'],
+ sorted(backend.created_disks.keys()))
+
+ # Assert that the original disks are presented first with the rescue
+ # disk attached as the final device in the domain.
+ expected_disk_paths = [
+ backend.disks['disk'].path, backend.disks['disk.eph0'].path,
+ backend.disks['disk.config'].path, '/dev/sdb',
+ backend.disks['disk.rescue'].path]
+ query = 'devices/disk/source/@*[name()="file" or name()="dev"]'
+ disk_paths = domain.xpath(query)
+ self.assertEqual(expected_disk_paths, disk_paths)
+
+ # Assert that the disk.rescue device has a boot order of 1
+ disk_path = backend.disks['disk.rescue'].path
+ query = "devices/disk[source/@file = '%s']/boot/@order" % disk_path
+ self.assertEqual('1', domain.xpath(query)[0])
+
+ def test_supports_bfv_rescue_capability(self):
+ """Assert that the supports_bfv_rescue capability is set"""
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertTrue(drvr.capabilities.get('supports_bfv_rescue'))
+
+ def test_rescue_stable_device_bfv_without_instance_image_ref(self):
+ """Assert that image_meta is fetched from the bdms for bfv instances"""
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ # Set instance.image_ref to None for this BFV instance
+ instance = self._create_instance({'config_drive': str(True)})
+ instance.image_ref = None
+
+ rescue_image_meta = objects.ImageMeta.from_dict(
+ {'id': uuids.rescue_image_id,
+ 'name': 'rescue',
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'virtio'}})
+ bdm = objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1,
+ 'image_id': uuids.bdm_image_id,
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'boot_index': 0}))
+ bdms = driver_block_device.convert_images([bdm])
+ block_device_info = {'root_device_name': '/dev/vda',
+ 'ephemerals': [],
+ 'swap': None,
+ 'block_device_mapping': bdms}
+ network_info = _fake_network_info(self)
+ disk_info = {'mapping': {}}
+
+ with test.nested(
+ mock.patch.object(drvr, '_create_domain'),
+ mock.patch.object(drvr, '_destroy'),
+ mock.patch.object(drvr, '_get_guest_xml'),
+ mock.patch.object(drvr, '_create_image'),
+ mock.patch.object(drvr, '_get_existing_domain_xml'),
+ mock.patch.object(libvirt_utils, 'write_to_file'),
+ mock.patch.object(libvirt_utils, 'get_instance_path'),
+ mock.patch('nova.virt.libvirt.blockinfo.get_disk_info'),
+ mock.patch('nova.image.glance.API.get'),
+ mock.patch('nova.objects.image_meta.ImageMeta.from_dict')
+ ) as (mock_create, mock_destroy, mock_get_guest_xml, mock_create_image,
+ mock_get_existing_xml, mock_write, mock_inst_path,
+ mock_get_disk_info, mock_image_get, mock_from_dict):
+
+ self.flags(virt_type='kvm', group='libvirt')
+ mock_image_get.return_value = mock.sentinel.bdm_image_meta_dict
+ mock_from_dict.return_value = mock.sentinel.bdm_image_meta
+ mock_get_disk_info.return_value = disk_info
+
+ drvr.rescue(self.context, instance, network_info,
+ rescue_image_meta, mock.sentinel.rescue_password,
+ block_device_info)
+
+ # Assert that we fetch image metadata from Glance using the image
+ # uuid stashed in the BDM and build an image_meta object using the
+ # returned dict.
+ mock_image_get.assert_called_once_with(
+ self.context, uuids.bdm_image_id)
+ mock_from_dict.assert_called_once_with(
+ mock.sentinel.bdm_image_meta_dict)
+
+ # Assert that get_disk_info is then called using this object
+ mock_get_disk_info.assert_called_once_with(
+ 'kvm', instance, mock.sentinel.bdm_image_meta, rescue=True,
+ block_device_info=block_device_info,
+ rescue_image_meta=rescue_image_meta)
+
+ # Assert that this object is also used when building guest XML
+ mock_get_guest_xml.assert_called_once_with(
+ self.context, instance, network_info, disk_info,
+ mock.sentinel.bdm_image_meta, rescue=mock.ANY, mdevs=mock.ANY,
+ block_device_info=block_device_info)
+
+ def test_rescue_stable_device_bfv(self):
+ """Assert the disk layout when rescuing BFV instances"""
+
+ # NOTE(lyarwood): instance.image_ref is left in place here to allow us
+ # to reuse the _test_rescue test method as we only care about the
+ # eventual disk layout and not how we get the image_meta in this test.
+ instance = self._create_instance({'config_drive': str(True)})
+
+ # Set ephemeral_gb to 0 to avoid any disk.local disks for being used
+ instance.ephemeral_gb = 0
+ inst_image_meta_dict = {'id': uuids.image_id, 'name': 'fake'}
+ rescue_image_meta_dict = {
+ 'id': uuids.rescue_image_id,
+ 'name': 'rescue',
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'virtio'}}
+ conn_info = {
+ 'driver_volume_type': 'iscsi',
+ 'data': {'device_path': '/dev/sdb'}}
+ bdm = objects.BlockDeviceMapping(
+ self.context,
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vda'}))
+ bdms = driver_block_device.convert_volumes([bdm])
+ block_device_info = {'root_device_name': '/dev/vda',
+ 'ephemerals': [],
+ 'swap': None,
+ 'block_device_mapping': bdms}
+ bdm = block_device_info['block_device_mapping'][0]
+ bdm['connection_info'] = conn_info
+
+ backend, domain = self._test_rescue(
+ instance,
+ image_meta_dict=rescue_image_meta_dict,
+ instance_image_meta_dict=inst_image_meta_dict,
+ block_device_info=block_device_info)
+
+ # Assert that we created the expected set of disks, and no others
+ self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'],
+ sorted(backend.created_disks.keys()))
+
+ # Assert that the original disks are presented first with the rescue
+ # disk attached as the final device in the domain.
+ expected_disk_paths = [backend.disks['disk.config'].path,
+ '/dev/sdb', backend.disks['disk.rescue'].path]
+ query = 'devices/disk/source/@*[name()="file" or name()="dev"]'
+ disk_paths = domain.xpath(query)
+ self.assertEqual(expected_disk_paths, disk_paths)
+
+ # Assert that the disk.rescue device has a boot order of 1
+ disk_path = backend.disks['disk.rescue'].path
+ query = "devices/disk[source/@file = '%s']/boot/@order" % disk_path
+ self.assertEqual('1', domain.xpath(query)[0])
+
@mock.patch.object(libvirt_utils, 'get_instance_path')
@mock.patch.object(libvirt_utils, 'load_file')
@mock.patch.object(host.Host, '_get_domain')
@@ -23988,7 +24376,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
"""Test getting storage bus traits per virt type."""
all_traits = set(ot.get_traits('COMPUTE_STORAGE_BUS_'))
# ensure each virt type reports the correct bus types
- for virt_type, buses in blockinfo.SUPPORTED_STORAGE_BUSES.items():
+ for virt_type, buses in blockinfo.SUPPORTED_DEVICE_BUSES.items():
if virt_type in ('qemu', 'kvm'):
continue
@@ -25005,8 +25393,6 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
mock_xml, mock_rebase, mock_commit):
"""Deleting newest snapshot -- blockRebase."""
- # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag
- fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stub_out('nova.virt.libvirt.driver.libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
@@ -25025,9 +25411,10 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
self.assertEqual(2, mock_is_job_complete.call_count)
mock_xml.assert_called_once_with(flags=0)
mock_get.assert_called_once_with(instance)
- mock_rebase.assert_called_once_with('vda', 'snap.img', 0, flags=0)
+ mock_rebase.assert_called_once_with(
+ 'vda', 'snap.img', 0,
+ flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
mock_commit.assert_not_called()
- fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
@mock.patch('time.sleep', new=mock.Mock())
@mock.patch.object(FakeVirtDomain, 'blockCommit')
@@ -25331,8 +25718,6 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
def XMLDesc(self, flags):
return self.dom_netdisk_xml
- # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE
- fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stub_out('nova.virt.libvirt.driver.libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
@@ -25357,11 +25742,10 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
self.assertEqual(2, mock_is_job_complete.call_count)
mock_xml.assert_called_once_with(flags=0)
mock_get.assert_called_once_with(instance)
- mock_rebase.assert_called_once_with('vdb', 'vdb[1]', 0, flags=0)
+ mock_rebase.assert_called_once_with('vdb', 'vdb[1]', 0,
+ flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
mock_commit.assert_not_called()
- fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
-
@mock.patch('time.sleep', new=mock.Mock())
@mock.patch.object(host.Host, '_get_domain')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 97b1386145..c53dfd2393 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -37,7 +37,6 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.libvirt import fakelibvirt
-from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import utils as libvirt_utils
@@ -93,244 +92,6 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
mock_exists.assert_called_once_with("%s/DiskDescriptor.xml" % path)
self.assertEqual('ploop', d_type)
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_disk_backing(self, mock_execute, mock_exists):
- path = '/myhome/disk.config'
- template_output = """image: %(path)s
-file format: raw
-virtual size: 2K (2048 bytes)
-cluster_size: 65536
-disk size: 96K
-"""
- output = template_output % ({
- 'path': path,
- })
- mock_execute.return_value = (output, '')
- d_backing = libvirt_utils.get_disk_backing_file(path)
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
- mock_exists.assert_called_once_with(path)
- self.assertIsNone(d_backing)
-
- def _test_disk_size(self, mock_execute, path, expected_size):
- d_size = libvirt_utils.get_disk_size(path)
- self.assertEqual(expected_size, d_size)
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
-
- @mock.patch('os.path.exists', return_value=True)
- def test_disk_size(self, mock_exists):
- path = '/myhome/disk.config'
- template_output = """image: %(path)s
-file format: raw
-virtual size: %(v_size)s (%(vsize_b)s bytes)
-cluster_size: 65536
-disk size: 96K
-"""
- for i in range(0, 128):
- bytes = i * 65336
- kbytes = bytes / 1024
- mbytes = kbytes / 1024
- output = template_output % ({
- 'v_size': "%sM" % (mbytes),
- 'vsize_b': i,
- 'path': path,
- })
- with mock.patch('oslo_concurrency.processutils.execute',
- return_value=(output, '')) as mock_execute:
- self._test_disk_size(mock_execute, path, i)
- output = template_output % ({
- 'v_size': "%sK" % (kbytes),
- 'vsize_b': i,
- 'path': path,
- })
- with mock.patch('oslo_concurrency.processutils.execute',
- return_value=(output, '')) as mock_execute:
- self._test_disk_size(mock_execute, path, i)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_qemu_img_info_json(self, mock_execute, mock_exists):
- path = "disk.config"
- example_output = """{
- "virtual-size": 67108864,
- "filename": "disk.config",
- "cluster-size": 65536,
- "format": "raw",
- "actual-size": 98304
-}
-"""
- mock_execute.return_value = (example_output, '')
- image_info = images.qemu_img_info(path, output_format='json')
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share', '--output=json',
- prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
- mock_exists.assert_called_once_with(path)
- self.assertEqual('disk.config', image_info.image)
- self.assertEqual('raw', image_info.file_format)
- self.assertEqual(67108864, image_info.virtual_size)
- self.assertEqual(98304, image_info.disk_size)
- self.assertEqual(65536, image_info.cluster_size)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_qemu_info_canon(self, mock_execute, mock_exists):
- path = "disk.config"
- example_output = """image: disk.config
-file format: raw
-virtual size: 64M (67108864 bytes)
-cluster_size: 65536
-disk size: 96K
-blah BLAH: bb
-"""
- mock_execute.return_value = (example_output, '')
- image_info = images.qemu_img_info(path)
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
- mock_exists.assert_called_once_with(path)
- self.assertEqual('disk.config', image_info.image)
- self.assertEqual('raw', image_info.file_format)
- self.assertEqual(67108864, image_info.virtual_size)
- self.assertEqual(98304, image_info.disk_size)
- self.assertEqual(65536, image_info.cluster_size)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_qemu_info_canon2(self, mock_execute, mock_exists):
- path = "disk.config"
- example_output = """image: disk.config
-file format: QCOW2
-virtual size: 67108844
-cluster_size: 65536
-disk size: 963434
-backing file: /var/lib/nova/a328c7998805951a_2
-"""
- mock_execute.return_value = (example_output, '')
- image_info = images.qemu_img_info(path)
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
- mock_exists.assert_called_once_with(path)
- self.assertEqual('disk.config', image_info.image)
- self.assertEqual('qcow2', image_info.file_format)
- self.assertEqual(67108844, image_info.virtual_size)
- self.assertEqual(963434, image_info.disk_size)
- self.assertEqual(65536, image_info.cluster_size)
- self.assertEqual('/var/lib/nova/a328c7998805951a_2',
- image_info.backing_file)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('os.path.isdir', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_qemu_info_ploop(self, mock_execute, mock_isdir, mock_exists):
- path = "/var/lib/nova"
- example_output = """image: root.hds
-file format: parallels
-virtual size: 3.0G (3221225472 bytes)
-disk size: 706M
-"""
- mock_execute.return_value = (example_output, '')
- image_info = images.qemu_img_info(path)
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
- os.path.join(path, 'root.hds'), '--force-share',
- prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
- mock_isdir.assert_called_once_with(path)
- self.assertEqual(2, mock_exists.call_count)
- self.assertEqual(path, mock_exists.call_args_list[0][0][0])
- self.assertEqual(os.path.join(path, 'DiskDescriptor.xml'),
- mock_exists.call_args_list[1][0][0])
- self.assertEqual('root.hds', image_info.image)
- self.assertEqual('parallels', image_info.file_format)
- self.assertEqual(3221225472, image_info.virtual_size)
- self.assertEqual(740294656, image_info.disk_size)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_qemu_backing_file_actual(self,
- mock_execute, mock_exists):
- path = "disk.config"
- example_output = """image: disk.config
-file format: raw
-virtual size: 64M (67108864 bytes)
-cluster_size: 65536
-disk size: 96K
-Snapshot list:
-ID TAG VM SIZE DATE VM CLOCK
-1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
-backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
-"""
- mock_execute.return_value = (example_output, '')
- image_info = images.qemu_img_info(path)
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
- mock_exists.assert_called_once_with(path)
- self.assertEqual('disk.config', image_info.image)
- self.assertEqual('raw', image_info.file_format)
- self.assertEqual(67108864, image_info.virtual_size)
- self.assertEqual(98304, image_info.disk_size)
- self.assertEqual(1, len(image_info.snapshots))
- self.assertEqual('/b/3a988059e51a_2',
- image_info.backing_file)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_qemu_info_convert(self, mock_execute, mock_exists):
- path = "disk.config"
- example_output = """image: disk.config
-file format: raw
-virtual size: 64M
-disk size: 96K
-Snapshot list:
-ID TAG VM SIZE DATE VM CLOCK
-1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
-3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
-4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
-junk stuff: bbb
-"""
- mock_execute.return_value = (example_output, '')
- image_info = images.qemu_img_info(path)
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
- mock_exists.assert_called_once_with(path)
- self.assertEqual('disk.config', image_info.image)
- self.assertEqual('raw', image_info.file_format)
- self.assertEqual(67108864, image_info.virtual_size)
- self.assertEqual(98304, image_info.disk_size)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_qemu_info_snaps(self, mock_execute, mock_exists):
- path = "disk.config"
- example_output = """image: disk.config
-file format: raw
-virtual size: 64M (67108864 bytes)
-disk size: 96K
-Snapshot list:
-ID TAG VM SIZE DATE VM CLOCK
-1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
-3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
-4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
-"""
- mock_execute.return_value = (example_output, '')
- image_info = images.qemu_img_info(path)
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
- mock_exists.assert_called_once_with(path)
- self.assertEqual('disk.config', image_info.image)
- self.assertEqual('raw', image_info.file_format)
- self.assertEqual(67108864, image_info.virtual_size)
- self.assertEqual(98304, image_info.disk_size)
- self.assertEqual(3, len(image_info.snapshots))
-
def test_valid_hostname_normal(self):
self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com"))
@@ -467,22 +228,6 @@ ID TAG VM SIZE DATE VM CLOCK
libvirt_utils.pick_disk_driver_name(version))
mock_execute.reset_mock()
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_get_disk_size(self, mock_execute, mock_exists):
- path = '/some/path'
- example_output = """image: 00000001
-file format: raw
-virtual size: 4.4M (4592640 bytes)
-disk size: 4.4M
-"""
- mock_execute.return_value = (example_output, '')
- self.assertEqual(4592640, disk.get_disk_size('/some/path'))
- mock_execute.assert_called_once_with(
- 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
- '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
- mock_exists.assert_called_once_with(path)
-
def test_copy_image(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
@@ -741,31 +486,6 @@ disk size: 4.4M
del self.executes
- def test_get_disk_backing_file(self):
- with_actual_path = False
-
- def fake_execute(*args, **kwargs):
- if with_actual_path:
- return ("some: output\n"
- "backing file: /foo/bar/baz (actual path: /a/b/c)\n"
- "...: ...\n"), ''
- else:
- return ("some: output\n"
- "backing file: /foo/bar/baz\n"
- "...: ...\n"), ''
-
- def return_true(*args, **kwargs):
- return True
-
- self.stub_out('oslo_concurrency.processutils.execute', fake_execute)
- self.stub_out('os.path.exists', return_true)
-
- out = libvirt_utils.get_disk_backing_file('')
- self.assertEqual(out, 'baz')
- with_actual_path = True
- out = libvirt_utils.get_disk_backing_file('')
- self.assertEqual(out, 'c')
-
def test_get_instance_path_at_destination(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid=uuids.instance)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_net.py b/nova/tests/unit/virt/libvirt/volume/test_net.py
index 0332afc3c2..25e0af7f39 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_net.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_net.py
@@ -221,7 +221,9 @@ class LibvirtNetVolumeDriverTestCase(
def test_extend_volume(self):
device_path = '/dev/fake-dev'
- connection_info = {'data': {'device_path': device_path}}
+ connection_info = {
+ 'driver_volume_type': 'net',
+ 'data': {'device_path': device_path}}
requested_size = 20 * pow(1024, 3) # 20GiB
@@ -231,3 +233,49 @@ class LibvirtNetVolumeDriverTestCase(
requested_size)
self.assertEqual(requested_size, new_size)
+
+ def test_libvirt_rbd_driver_block_connect(self):
+ self.flags(rbd_volume_local_attach=True, group='workarounds')
+ connection_info = self.rbd_connection(self.vol)
+ libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_host)
+ libvirt_driver.connector.connect_volume = mock.MagicMock(
+ return_value = {'path': mock.sentinel.rbd_dev})
+ libvirt_driver.connect_volume(connection_info, mock.sentinel.instance)
+
+ # Assert that the connector is called correctly and device_path updated
+ libvirt_driver.connector.connect_volume.assert_called_once_with(
+ connection_info['data'])
+
+ def test_libvirt_rbd_driver_block_disconnect(self):
+ self.flags(rbd_volume_local_attach=True, group='workarounds')
+ connection_info = self.rbd_connection(self.vol)
+ libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_host)
+ libvirt_driver.connector.disconnect_volume = mock.MagicMock()
+ libvirt_driver.disconnect_volume(connection_info,
+ mock.sentinel.instance)
+
+ # Assert that the connector is called correctly
+ libvirt_driver.connector.disconnect_volume.assert_called_once_with(
+ connection_info['data'], None)
+
+ def test_libvirt_rbd_driver_block_config(self):
+ self.flags(rbd_volume_local_attach=True, group='workarounds')
+ connection_info = self.rbd_connection(self.vol)
+ connection_info['data']['device_path'] = '/dev/rbd0'
+ libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_host)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+
+ # Assert that the returned config is for a RBD block device
+ self.assertEqual('block', conf.source_type)
+ self.assertEqual('/dev/rbd0', conf.source_path)
+ self.assertEqual('native', conf.driver_io)
+
+ def test_libvirt_rbd_driver_block_extend(self):
+ self.flags(rbd_volume_local_attach=True, group='workarounds')
+ connection_info = self.rbd_connection(self.vol)
+ libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_host)
+
+ # Assert NotImplementedError is raised for extend_volume
+ self.assertRaises(NotImplementedError, libvirt_driver.extend_volume,
+ connection_info, mock.sentinel.instance,
+ mock.sentinel.requested_size)
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 95ccd7d2fe..141ef98f11 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -14,6 +14,7 @@
import collections
import copy
+import ddt
import mock
import testtools
@@ -4376,3 +4377,18 @@ class PCINUMAAffinityPolicyTest(test.NoDBTestCase):
hw.get_pci_numa_policy_constraint, flavor, image_meta)
with testtools.ExpectedException(ValueError):
image_meta.properties.hw_pci_numa_affinity_policy = "fake"
+
+
+@ddt.ddt
+class RescuePropertyTestCase(test.NoDBTestCase):
+
+ @ddt.unpack
+ @ddt.data({'props': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'virtio'}, 'expected': True},
+ {'props': {'hw_rescue_device': 'disk'}, 'expected': True},
+ {'props': {'hw_rescue_bus': 'virtio'}, 'expected': True},
+ {'props': {'hw_disk_bus': 'virtio'}, 'expected': False})
+ def test_check_hw_rescue_props(self, props=None, expected=None):
+ meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
+ meta.properties = objects.ImageMetaProps.from_dict(props)
+ self.assertEqual(expected, hw.check_hw_rescue_props(meta))
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 296aa24b09..199d4cf8e1 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -41,22 +41,20 @@ class QemuTestCase(test.NoDBTestCase):
'/fake/path')
@mock.patch.object(os.path, 'exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute',
- return_value=('stdout', None))
+ @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info',
+ return_value={})
def test_qemu_info_with_no_errors(self, path_exists,
utils_execute):
image_info = images.qemu_img_info('/fake/path')
self.assertTrue(image_info)
- self.assertTrue(str(image_info))
- @mock.patch('oslo_concurrency.processutils.execute',
- return_value=('stdout', None))
+ @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info',
+ return_value={})
def test_qemu_info_with_rbd_path(self, utils_execute):
# Assert that the use of a RBD URI as the path doesn't raise
# exception.DiskNotFound
image_info = images.qemu_img_info('rbd:volume/pool')
self.assertTrue(image_info)
- self.assertTrue(str(image_info))
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
diff --git a/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova/tests/unit/virt/xenapi/test_xenapi.py
index e47a6e97d4..f0538f6b51 100644
--- a/nova/tests/unit/virt/xenapi/test_xenapi.py
+++ b/nova/tests/unit/virt/xenapi/test_xenapi.py
@@ -1852,7 +1852,8 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True,
- block_device_info=None, power_on=power_on)
+ allocations={}, block_device_info=None,
+ power_on=power_on)
self.assertTrue(self.called)
self.assertEqual(self.fake_vm_start_called, power_on)
@@ -1893,7 +1894,8 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True,
- block_device_info=None, power_on=power_on)
+ allocations={}, block_device_info=None,
+ power_on=power_on)
self.assertTrue(self.called)
self.assertEqual(self.fake_vm_start_called, power_on)
@@ -1923,7 +1925,8 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
{'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
- network_info, image_meta, resize_instance=True)
+ network_info, image_meta, resize_instance=True,
+ allocations={})
def test_finish_migrate_no_resize_vdi(self):
instance = create_instance_with_system_metadata(self.context,
@@ -1942,7 +1945,8 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
{'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
- network_info, image_meta, resize_instance=False)
+ network_info, image_meta, resize_instance=False,
+ allocations={})
@stub_vm_utils_with_vdi_attached
def test_migrate_too_many_partitions_no_resize_down(self):
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index bcf1637b9b..4ca8e4ee3e 100644
--- a/nova/tests/unit/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -14,6 +14,7 @@
# under the License.
from cinderclient import api_versions as cinder_api_versions
+from cinderclient import apiclient as cinder_apiclient
from cinderclient import exceptions as cinder_exception
from cinderclient.v2 import limits as cinder_limits
from keystoneauth1 import loading as ks_loading
@@ -546,6 +547,38 @@ class CinderApiTestCase(test.NoDBTestCase):
mock_cinderclient.assert_called_once_with(self.ctx, '3.44',
skip_version_check=True)
+ @mock.patch('nova.volume.cinder.cinderclient',
+ side_effect=cinder_apiclient.exceptions.InternalServerError)
+ def test_attachment_delete_internal_server_error(self, mock_cinderclient):
+
+ self.assertRaises(cinder_apiclient.exceptions.InternalServerError,
+ self.api.attachment_delete,
+ self.ctx, uuids.attachment_id)
+
+ self.assertEqual(5, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_internal_server_error_do_not_raise(
+ self, mock_cinderclient):
+ # generate exception, and then have a normal return on the next retry
+ mock_cinderclient.return_value.attachments.delete.side_effect = [
+ cinder_apiclient.exceptions.InternalServerError, None]
+
+ attachment_id = uuids.attachment
+ self.api.attachment_delete(self.ctx, attachment_id)
+
+ self.assertEqual(2, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient',
+ side_effect=cinder_exception.BadRequest(code=400))
+ def test_attachment_delete_bad_request_exception(self, mock_cinderclient):
+
+ self.assertRaises(exception.InvalidInput,
+ self.api.attachment_delete,
+ self.ctx, uuids.attachment_id)
+
+ self.assertEqual(1, mock_cinderclient.call_count)
+
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_complete(self, mock_cinderclient):
mock_attachments = mock.MagicMock()
@@ -635,6 +668,38 @@ class CinderApiTestCase(test.NoDBTestCase):
mock_cinderclient.assert_called_with(self.ctx, microversion=None)
mock_volumes.detach.assert_called_once_with('id1', 'fakeid')
+ @mock.patch('nova.volume.cinder.cinderclient',
+ side_effect=cinder_apiclient.exceptions.InternalServerError)
+ def test_detach_internal_server_error(self, mock_cinderclient):
+
+ self.assertRaises(cinder_apiclient.exceptions.InternalServerError,
+ self.api.detach,
+ self.ctx, 'id1', instance_uuid='fake_uuid')
+
+ self.assertEqual(5, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_detach_internal_server_error_do_not_raise(
+ self, mock_cinderclient):
+ # generate exception, and then have a normal return on the next retry
+ mock_cinderclient.return_value.volumes.detach.side_effect = [
+ cinder_apiclient.exceptions.InternalServerError, None]
+
+ self.api.detach(self.ctx, 'id1', instance_uuid='fake_uuid',
+ attachment_id='fakeid')
+
+ self.assertEqual(2, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient',
+ side_effect=cinder_exception.BadRequest(code=400))
+ def test_detach_bad_request_exception(self, mock_cinderclient):
+
+ self.assertRaises(exception.InvalidInput,
+ self.api.detach,
+ self.ctx, 'id1', instance_uuid='fake_uuid')
+
+ self.assertEqual(1, mock_cinderclient.call_count)
+
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_get(self, mock_cinderclient):
mock_attachment = mock.MagicMock()
@@ -754,6 +819,38 @@ class CinderApiTestCase(test.NoDBTestCase):
mock_volumes.terminate_connection.assert_called_once_with('id1',
'connector')
+ @mock.patch('nova.volume.cinder.cinderclient',
+ side_effect=cinder_apiclient.exceptions.InternalServerError)
+ def test_terminate_connection_internal_server_error(
+ self, mock_cinderclient):
+ self.assertRaises(cinder_apiclient.exceptions.InternalServerError,
+ self.api.terminate_connection,
+ self.ctx, 'id1', 'connector')
+
+ self.assertEqual(5, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_terminate_connection_internal_server_error_do_not_raise(
+ self, mock_cinderclient):
+ # generate exception, and then have a normal return on the next retry
+ mock_cinderclient.return_value.volumes.terminate_connection.\
+ side_effect = [cinder_apiclient.exceptions.InternalServerError,
+ None]
+
+ self.api.terminate_connection(self.ctx, 'id1', 'connector')
+
+ self.assertEqual(2, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient',
+ side_effect=cinder_exception.BadRequest(code=400))
+ def test_terminate_connection_bad_request_exception(
+ self, mock_cinderclient):
+ self.assertRaises(exception.InvalidInput,
+ self.api.terminate_connection,
+ self.ctx, 'id1', 'connector')
+
+ self.assertEqual(1, mock_cinderclient.call_count)
+
@mock.patch('nova.volume.cinder.cinderclient')
def test_delete(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
diff --git a/nova/utils.py b/nova/utils.py
index 50222092d2..87217abfa1 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -50,6 +50,7 @@ from oslo_utils import units
import six
from six.moves import range
+from nova import block_device
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LW
@@ -782,6 +783,61 @@ def get_image_from_system_metadata(system_meta):
return image_meta
+def get_bdm_image_metadata(context, image_api, volume_api,
+ block_device_mapping, legacy_bdm=True):
+ """Attempt to retrive image metadata from a given block_device_mapping.
+
+ If we are booting from a volume, we need to get the volume details from
+ Cinder and make sure we pass the metadata back accordingly.
+
+ :param context: request context
+ :param image_api: Image API
+ :param volume_api: Volume API
+ :param block_device_mapping:
+ :param legacy_bdm:
+ """
+ if not block_device_mapping:
+ return {}
+
+ for bdm in block_device_mapping:
+ if (legacy_bdm and
+ block_device.get_device_letter(
+ bdm.get('device_name', '')) != 'a'):
+ continue
+ elif not legacy_bdm and bdm.get('boot_index') != 0:
+ continue
+
+ volume_id = bdm.get('volume_id')
+ snapshot_id = bdm.get('snapshot_id')
+ if snapshot_id:
+ # NOTE(alaski): A volume snapshot inherits metadata from the
+ # originating volume, but the API does not expose metadata
+ # on the snapshot itself. So we query the volume for it below.
+ snapshot = volume_api.get_snapshot(context, snapshot_id)
+ volume_id = snapshot['volume_id']
+
+ if bdm.get('image_id'):
+ try:
+ image_id = bdm['image_id']
+ image_meta = image_api.get(context, image_id)
+ return image_meta
+ except Exception:
+ raise exception.InvalidBDMImage(id=image_id)
+ elif volume_id:
+ try:
+ volume = volume_api.get(context, volume_id)
+ except exception.CinderConnectionFailed:
+ raise
+ except Exception:
+ raise exception.InvalidBDMVolume(id=volume_id)
+
+ if not volume.get('bootable', True):
+ raise exception.InvalidBDMVolumeNotBootable(id=volume_id)
+
+ return get_image_metadata_from_volume(volume)
+ return {}
+
+
def get_image_metadata_from_volume(volume):
properties = copy.copy(volume.get('volume_image_metadata', {}))
image_meta = {'properties': properties}
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 484e42da1d..a38809b0da 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -125,9 +125,10 @@ CAPABILITY_TRAITS_MAP = {
"supports_image_type_vmdk": os_traits.COMPUTE_IMAGE_TYPE_VMDK,
# Added in os-traits 2.0.0
"supports_image_type_ploop": os_traits.COMPUTE_IMAGE_TYPE_PLOOP,
-
# Added in os-traits 2.1.0.
"supports_migrate_to_same_host": os_traits.COMPUTE_SAME_HOST_COLD_MIGRATE,
+ # Added in os-traits 2.2.0.
+ "supports_bfv_rescue": os_traits.COMPUTE_RESCUE_BFV,
}
@@ -178,6 +179,7 @@ class ComputeDriver(object):
"supports_trusted_certs": False,
"supports_pcpus": False,
"supports_accelerators": False,
+ "supports_bfv_rescue": False,
# Image type support flags
"supports_image_type_aki": False,
@@ -730,7 +732,7 @@ class ComputeDriver(object):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
- block_device_info=None, power_on=True):
+ allocations, block_device_info=None, power_on=True):
"""Completes a resize/migration.
:param context: the context for the migration/resize
@@ -742,6 +744,9 @@ class ComputeDriver(object):
The metadata of the image of the instance.
:param resize_instance: True if the instance is being resized,
False otherwise
+ :param allocations: Information about resources allocated to the
+ instance via placement, of the form returned by
+ SchedulerReportClient.get_allocs_for_consumer.
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 2c668af875..c868d0bd99 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -601,8 +601,8 @@ class FakeDriver(driver.ComputeDriver):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
- block_device_info=None, power_on=True):
- injected_files = admin_password = allocations = None
+ allocations, block_device_info=None, power_on=True):
+ injected_files = admin_password = None
# Finish migration is just like spawning the guest on a destination
# host during resize/cold migrate, so re-use the spawn() fake to
# claim resources and track the instance on this "hypervisor".
@@ -711,6 +711,10 @@ class SameHostColdMigrateDriver(MediumFakeDriver):
supports_migrate_to_same_host=True)
+class RescueBFVDriver(MediumFakeDriver):
+ capabilities = dict(FakeDriver.capabilities, supports_bfv_rescue=True)
+
+
class PowerUpdateFakeDriver(SmallFakeDriver):
# A specific fake driver for the power-update external event testing.
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 174035ebb5..12a6a3f39b 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -2259,3 +2259,10 @@ def get_vpmems(flavor):
if formed_label:
formed_labels.append(formed_label)
return formed_labels
+
+
+def check_hw_rescue_props(image_meta):
+ """Confirm that hw_rescue_* image properties are present.
+ """
+ hw_rescue_props = ['hw_rescue_device', 'hw_rescue_bus']
+ return any(key in image_meta.properties for key in hw_rescue_props)
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 2d34aa9e07..dee48c617c 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -326,7 +326,7 @@ class HyperVDriver(driver.ComputeDriver):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
- block_device_info=None, power_on=True):
+ allocations, block_device_info=None, power_on=True):
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 800ebca3c7..5358f3766a 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -39,30 +39,22 @@ CONF = nova.conf.CONF
IMAGE_API = glance.API()
-def qemu_img_info(path, format=None, output_format=None):
+def qemu_img_info(path, format=None):
"""Return an object containing the parsed output from qemu-img info."""
if not os.path.exists(path) and not path.startswith('rbd:'):
raise exception.DiskNotFound(location=path)
- info = nova.privsep.qemu.unprivileged_qemu_img_info(
- path, format=format, output_format=output_format)
- if output_format:
- return imageutils.QemuImgInfo(info, format=output_format)
- else:
- return imageutils.QemuImgInfo(info)
+ info = nova.privsep.qemu.unprivileged_qemu_img_info(path, format=format)
+ return imageutils.QemuImgInfo(info, format='json')
-def privileged_qemu_img_info(path, format=None, output_format=None):
+def privileged_qemu_img_info(path, format=None, output_format='json'):
"""Return an object containing the parsed output from qemu-img info."""
if not os.path.exists(path) and not path.startswith('rbd:'):
raise exception.DiskNotFound(location=path)
- info = nova.privsep.qemu.privileged_qemu_img_info(
- path, format=format, output_format=output_format)
- if output_format:
- return imageutils.QemuImgInfo(info, format=output_format)
- else:
- return imageutils.QemuImgInfo(info)
+ info = nova.privsep.qemu.privileged_qemu_img_info(path, format=format)
+ return imageutils.QemuImgInfo(info, format='json')
def convert_image(source, dest, in_format, out_format, run_as_root=False,
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index b18c8ada63..44debbb90f 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -87,9 +87,17 @@ from nova.virt import osinfo
CONF = cfg.CONF
-
-SUPPORTED_DEVICE_TYPES = ('disk', 'cdrom', 'floppy', 'lun')
BOOT_DEV_FOR_TYPE = {'disk': 'hd', 'cdrom': 'cdrom', 'floppy': 'fd'}
+# NOTE(aspiers): If you change this, don't forget to update the docs and
+# metadata for hw_*_bus in glance.
+SUPPORTED_DEVICE_BUSES = {
+ 'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'],
+ 'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'],
+ 'xen': ['xen', 'ide'],
+ 'uml': ['uml'],
+ 'lxc': ['lxc'],
+ 'parallels': ['ide', 'scsi']}
+SUPPORTED_DEVICE_TYPES = ('disk', 'cdrom', 'floppy', 'lun')
def has_disk_dev(mapping, disk_dev):
@@ -200,24 +208,10 @@ def find_disk_dev_for_disk_bus(mapping, bus,
raise exception.TooManyDiskDevices(maximum=max_dev)
-# NOTE(aspiers): If you change this, don't forget to update the docs and
-# metadata for hw_*_bus in glance. In addition, these bus names map directly to
-# standard os-traits as 'foo' => 'COMPUTE_STORAGE_BUS_FOO'. If adding a new bus
-# name, make sure the standard trait conforms to this rule.
-SUPPORTED_STORAGE_BUSES = {
- 'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'],
- 'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'],
- 'xen': ['xen', 'ide'],
- 'uml': ['uml'],
- 'lxc': ['lxc'],
- 'parallels': ['ide', 'scsi']
-}
-
-
def is_disk_bus_valid_for_virt(virt_type, disk_bus):
- if virt_type not in SUPPORTED_STORAGE_BUSES:
+ if virt_type not in SUPPORTED_DEVICE_BUSES:
raise exception.UnsupportedVirtType(virt=virt_type)
- return disk_bus in SUPPORTED_STORAGE_BUSES[virt_type]
+ return disk_bus in SUPPORTED_DEVICE_BUSES[virt_type]
def get_disk_bus_for_device_type(instance,
@@ -511,11 +505,9 @@ def update_bdm(bdm, info):
info['bus'], info['type']))))
-def get_disk_mapping(virt_type, instance,
- disk_bus, cdrom_bus,
- image_meta,
- block_device_info=None,
- rescue=False):
+def get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta,
+ block_device_info=None, rescue=False,
+ rescue_image_meta=None):
"""Determine how to map default disks to the virtual machine.
This is about figuring out whether the default 'disk',
@@ -524,32 +516,76 @@ def get_disk_mapping(virt_type, instance,
Returns the guest disk mapping for the devices.
"""
+ # NOTE(lyarwood): This is a legacy rescue attempt so provide a mapping with
+ # the rescue disk, original root disk and optional config drive.
+ if rescue and rescue_image_meta is None:
+ return _get_rescue_disk_mapping(
+ virt_type, instance, disk_bus, image_meta)
+
+ # NOTE(lyarwood): This is a new stable rescue attempt so provide a mapping
+ # with the original mapping *and* rescue disk appended to the end.
+ if rescue and rescue_image_meta:
+ return _get_stable_device_rescue_mapping(
+ virt_type, instance, disk_bus, cdrom_bus, image_meta,
+ block_device_info, rescue_image_meta)
+
+ # NOTE(lyarwood): This is a normal spawn so fetch the full disk mapping.
+ return _get_disk_mapping(
+ virt_type, instance, disk_bus, cdrom_bus, image_meta,
+ block_device_info)
+
+
+def _get_rescue_disk_mapping(virt_type, instance, disk_bus, image_meta):
+ """Build disk mapping for a legacy instance rescue
+
+ This legacy method of rescue requires that the rescue device is attached
+ first, ahead of the original root disk and optional config drive.
+
+ :param virt_type: Virt type used by libvirt.
+ :param instance: nova.objects.instance.Instance object
+ :param disk_bus: Disk bus to use within the mapping
+ :param image_meta: objects.image_meta.ImageMeta for the instance
+ :returns: Disk mapping for the given instance
+ """
mapping = {}
+ rescue_info = get_next_disk_info(mapping,
+ disk_bus, boot_index=1)
+ mapping['disk.rescue'] = rescue_info
+ mapping['root'] = rescue_info
+
+ os_info = get_next_disk_info(mapping,
+ disk_bus)
+ mapping['disk'] = os_info
+
+ if configdrive.required_by(instance):
+ device_type = get_config_drive_type()
+ disk_bus = get_disk_bus_for_device_type(instance,
+ virt_type,
+ image_meta,
+ device_type)
+ config_info = get_next_disk_info(mapping,
+ disk_bus,
+ device_type)
+ mapping['disk.config.rescue'] = config_info
+
+ return mapping
+
- if rescue:
- rescue_info = get_next_disk_info(mapping,
- disk_bus, boot_index=1)
- mapping['disk.rescue'] = rescue_info
- mapping['root'] = rescue_info
-
- os_info = get_next_disk_info(mapping,
- disk_bus)
- mapping['disk'] = os_info
-
- if configdrive.required_by(instance):
- device_type = get_config_drive_type()
- disk_bus = get_disk_bus_for_device_type(instance,
- virt_type,
- image_meta,
- device_type)
- config_info = get_next_disk_info(mapping,
- disk_bus,
- device_type)
- mapping['disk.config.rescue'] = config_info
-
- return mapping
+def _get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta,
+ block_device_info):
+ """Build disk mapping for a given instance
+ :param virt_type: Virt type used by libvirt.
+ :param instance: nova.objects.instance.Instance object
+ :param disk_bus: Disk bus to use within the mapping
+ :param cdrom_bus: CD-ROM bus to use within the mapping
+ :param image_meta: objects.image_meta.ImageMeta for the instance
+ :param block_device_info: dict detailing disks and volumes attached
+
+ :returns: Disk mapping for the given instance.
+ """
+ mapping = {}
pre_assigned_device_names = \
[block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain(
driver.block_device_info_get_ephemerals(block_device_info),
@@ -632,12 +668,40 @@ def get_disk_mapping(virt_type, instance,
disk_bus,
device_type)
mapping['disk.config'] = config_info
+ return mapping
+
+
+def _get_stable_device_rescue_mapping(virt_type, instance, disk_bus, cdrom_bus,
+ image_meta, block_device_info, rescue_image_meta):
+ """Build a disk mapping for a given instance and add a rescue device
+
+ This method builds the original disk mapping of the instance before
+ attaching the rescue device last.
+ :param virt_type: Virt type used by libvirt.
+ :param instance: nova.objects.instance.Instance object
+ :param disk_bus: Disk bus to use within the mapping
+ :param cdrom_bus: CD-ROM bus to use within the mapping
+ :param image_meta: objects.image_meta.ImageMeta for the instance
+ :param block_device_info: dict detailing disks and volumes attached
+ :param rescue_image_meta: objects.image_meta.ImageMeta of the rescue image
+
+ :returns: Disk mapping dict with rescue device added.
+ """
+ mapping = _get_disk_mapping(
+ virt_type, instance, disk_bus, cdrom_bus, image_meta,
+ block_device_info)
+ rescue_device = get_rescue_device(rescue_image_meta)
+ rescue_bus = get_rescue_bus(instance, virt_type, rescue_image_meta,
+ rescue_device)
+ rescue_info = get_next_disk_info(mapping, rescue_bus,
+ device_type=rescue_device)
+ mapping['disk.rescue'] = rescue_info
return mapping
-def get_disk_info(virt_type, instance, image_meta,
- block_device_info=None, rescue=False):
+def get_disk_info(virt_type, instance, image_meta, block_device_info=None,
+ rescue=False, rescue_image_meta=None):
"""Determine guest disk mapping info.
This is a wrapper around get_disk_mapping, which
@@ -658,8 +722,9 @@ def get_disk_info(virt_type, instance, image_meta,
mapping = get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
image_meta,
- block_device_info,
- rescue)
+ block_device_info=block_device_info,
+ rescue=rescue,
+ rescue_image_meta=rescue_image_meta)
return {'disk_bus': disk_bus,
'cdrom_bus': cdrom_bus,
@@ -678,3 +743,42 @@ def get_boot_order(disk_info):
return [el for el in lst if el not in s and not s.add(el)]
return uniq(boot_devs_dup)
+
+
+def get_rescue_device(rescue_image_meta):
+ """Find and validate the rescue device
+
+ :param rescue_image_meta: ImageMeta object provided when rescuing
+
+ :raises: UnsupportedRescueDevice if the requested device type is not
+ supported
+ :returns: A valid device type to be used during the rescue
+ """
+ rescue_device = rescue_image_meta.properties.get("hw_rescue_device",
+ "disk")
+ if rescue_device not in SUPPORTED_DEVICE_TYPES:
+ raise exception.UnsupportedRescueDevice(device=rescue_device)
+ return rescue_device
+
+
+def get_rescue_bus(instance, virt_type, rescue_image_meta, rescue_device):
+ """Find and validate the rescue bus
+
+ :param instance: The instance to be rescued
+ :param virt_type: The hypervisor the instance will run on
+ :param rescue_image_meta: ImageMeta object provided when rescuing
+ :param rescue_device: The rescue device being used
+
+ :raises: UnsupportedRescueBus if the requested bus is not
+ supported by the hypervisor
+ :returns: A valid device bus given virt_type and rescue device
+ """
+ rescue_bus = rescue_image_meta.properties.get("hw_rescue_bus")
+ if rescue_bus is not None:
+ if is_disk_bus_valid_for_virt(virt_type, rescue_bus):
+ return rescue_bus
+ else:
+ raise exception.UnsupportedRescueBus(bus=rescue_bus,
+ virt=virt_type)
+ return get_disk_bus_for_device_type(instance, virt_type, rescue_image_meta,
+ device_type=rescue_device)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index c5e9fbb58e..7b14dc2b95 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -333,6 +333,7 @@ class LibvirtDriver(driver.ComputeDriver):
"supports_image_type_ploop": requires_ploop_image,
"supports_pcpus": True,
"supports_accelerators": True,
+ "supports_bfv_rescue": True,
}
super(LibvirtDriver, self).__init__(virtapi)
@@ -1291,8 +1292,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
hw_firmware_type = instance.image_meta.properties.get(
'hw_firmware_type')
- support_uefi = (self._has_uefi_support() and
- hw_firmware_type == fields.FirmwareType.UEFI)
+ support_uefi = self._check_uefi_support(hw_firmware_type)
guest.delete_configuration(support_uefi)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception() as ctxt:
@@ -1422,16 +1422,16 @@ class LibvirtDriver(driver.ComputeDriver):
for source in tcp_devices:
yield (source.get("host"), int(source.get("service")))
- def _get_scsi_controller_max_unit(self, guest):
+ def _get_scsi_controller_next_unit(self, guest):
"""Returns the max disk unit used by scsi controller"""
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
- addrs = "./devices/disk[@device='disk']/address[@type='drive']"
+ addrs = "./devices/disk[target/@bus='scsi']/address[@type='drive']"
ret = []
- for obj in tree.findall(addrs):
+ for obj in tree.xpath(addrs):
ret.append(int(obj.get('unit', 0)))
- return max(ret)
+ return max(ret) + 1 if ret else 0
def _cleanup_rbd(self, instance):
# NOTE(nic): On revert_resize, the cleanup steps for the root
@@ -1591,9 +1591,16 @@ class LibvirtDriver(driver.ComputeDriver):
return vol_driver.extend_volume(connection_info, instance,
requested_size)
- def _is_luks_v1(self, encryption=None):
- """Check if LUKS (v1) is the encryption 'provider'
+ def _allow_native_luksv1(self, encryption=None):
+ """Check if QEMU's native LUKSv1 decryption should be used.
"""
+ # NOTE(lyarwood): Native LUKSv1 decryption can be disabled via a
+ # workarounds configurable in order to aviod known performance issues
+ # with the libgcrypt lib.
+ if CONF.workarounds.disable_native_luksv1:
+ return False
+
+ # NOTE(lyarwood): Ensure the LUKSv1 provider is used.
provider = None
if encryption:
provider = encryption.get('provider', None)
@@ -1635,7 +1642,7 @@ class LibvirtDriver(driver.ComputeDriver):
if encryption is None:
encryption = self._get_volume_encryption(context, connection_info)
- if encryption and self._is_luks_v1(encryption=encryption):
+ if encryption and self._allow_native_luksv1(encryption=encryption):
# NOTE(lyarwood): Fetch the associated key for the volume and
# decode the passphrase from the key.
# FIXME(lyarwood): c-vol currently creates symmetric keys for use
@@ -1690,7 +1697,7 @@ class LibvirtDriver(driver.ComputeDriver):
# and device_path is not present in the connection_info. This avoids
# VolumeEncryptionNotSupported being thrown when we incorrectly build
# the encryptor below due to the secrets not being present above.
- if (encryption and self._is_luks_v1(encryption=encryption) and
+ if (encryption and self._allow_native_luksv1(encryption=encryption) and
not connection_info['data'].get('device_path')):
return
if encryption:
@@ -1745,7 +1752,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta, bdm)
if disk_info['bus'] == 'scsi':
- disk_info['unit'] = self._get_scsi_controller_max_unit(guest) + 1
+ disk_info['unit'] = self._get_scsi_controller_next_unit(guest)
conf = self._get_volume_config(connection_info, disk_info)
@@ -1821,8 +1828,7 @@ class LibvirtDriver(driver.ComputeDriver):
# undefine it. If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
- support_uefi = (self._has_uefi_support() and
- hw_firmware_type == fields.FirmwareType.UEFI)
+ support_uefi = self._check_uefi_support(hw_firmware_type)
guest.delete_configuration(support_uefi)
try:
@@ -1877,8 +1883,8 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(lyarwood): https://bugzilla.redhat.com/show_bug.cgi?id=760547
old_encrypt = self._get_volume_encryption(context, old_connection_info)
new_encrypt = self._get_volume_encryption(context, new_connection_info)
- if ((old_encrypt and self._is_luks_v1(old_encrypt)) or
- (new_encrypt and self._is_luks_v1(new_encrypt))):
+ if ((old_encrypt and self._allow_native_luksv1(old_encrypt)) or
+ (new_encrypt and self._allow_native_luksv1(new_encrypt))):
raise NotImplementedError(_("Swap volume is not supported for "
"encrypted volumes when native LUKS decryption is enabled."))
@@ -2000,7 +2006,7 @@ class LibvirtDriver(driver.ComputeDriver):
# volumes we need to ensure this now takes the LUKSv1 header and key
# material into account. Otherwise QEMU will attempt and fail to grow
# host block devices and remote RBD volumes.
- if self._is_luks_v1(encryption):
+ if self._allow_native_luksv1(encryption):
try:
# NOTE(lyarwood): Find the path to provide to qemu-img
if 'device_path' in connection_info['data']:
@@ -2011,8 +2017,7 @@ class LibvirtDriver(driver.ComputeDriver):
path = 'unknown'
raise exception.DiskNotFound(location='unknown')
- info = images.privileged_qemu_img_info(
- path, output_format='json')
+ info = images.privileged_qemu_img_info(path)
format_specific_data = info.format_specific['data']
payload_offset = format_specific_data['payload-offset']
@@ -2586,8 +2591,7 @@ class LibvirtDriver(driver.ComputeDriver):
if guest.has_persistent_configuration():
hw_firmware_type = image_meta.properties.get(
'hw_firmware_type')
- support_uefi = (self._has_uefi_support() and
- hw_firmware_type == fields.FirmwareType.UEFI)
+ support_uefi = self._check_uefi_support(hw_firmware_type)
guest.delete_configuration(support_uefi)
# NOTE (rmk): Establish a temporary mirror of our root disk and
@@ -2992,21 +2996,7 @@ class LibvirtDriver(driver.ComputeDriver):
rebase_base = _get_snap_dev(rebase_base,
active_disk_object.backing_store)
- # NOTE(deepakcs): libvirt added support for _RELATIVE in v1.2.7,
- # and when available this flag _must_ be used to ensure backing
- # paths are maintained relative by qemu.
- #
- # If _RELATIVE flag not found, continue with old behaviour
- # (relative backing path seems to work for this case)
- try:
- libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE
- relative = rebase_base is not None
- except AttributeError:
- LOG.warning(
- "Relative blockrebase support was not detected. "
- "Continuing with old behaviour.")
- relative = False
-
+ relative = rebase_base is not None
LOG.debug(
'disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, relative: %(relative)s',
@@ -3417,6 +3407,29 @@ class LibvirtDriver(driver.ComputeDriver):
should not edit or over-ride the original image, only allow for
data recovery.
+ Two modes are provided when rescuing an instance with this driver.
+
+ The original and default rescue mode, where the rescue boot disk,
+ original root disk and optional regenerated config drive are attached
+ to the instance.
+
+ A second stable device rescue mode is also provided where all of the
+ original devices are attached to the instance during the rescue attempt
+ with the addition of the rescue boot disk. This second mode is
+ controlled by the hw_rescue_device and hw_rescue_bus image properties
+ on the rescue image provided to this method via image_meta.
+
+ :param nova.context.RequestContext context:
+ The context for the rescue.
+ :param nova.objects.instance.Instance instance:
+ The instance being rescued.
+ :param nova.network.model.NetworkInfo network_info:
+ Necessary network information for the resume.
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
+ :param rescue_password: new root password to set for rescue.
+ :param dict block_device_info:
+ The block device mapping of the instance.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
@@ -3424,6 +3437,7 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_image_id = None
+ rescue_image_meta = None
if image_meta.obj_attr_is_set("id"):
rescue_image_id = image_meta.id
@@ -3435,10 +3449,50 @@ class LibvirtDriver(driver.ComputeDriver):
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance,
- image_meta,
- rescue=True)
+
+ virt_type = CONF.libvirt.virt_type
+ if hardware.check_hw_rescue_props(image_meta):
+ LOG.info("Attempting a stable device rescue", instance=instance)
+ # NOTE(lyarwood): Stable device rescue is not supported when using
+ # the LXC and Xen virt_types as they do not support the required
+ # <boot order=''> definitions allowing an instance to boot from the
+ # rescue device added as a final device to the domain.
+ if virt_type in ('lxc', 'xen'):
+ reason = ("Stable device rescue is not supported by virt_type "
+ "%s", virt_type)
+ raise exception.InstanceNotRescuable(instance_id=instance.uuid,
+ reason=reason)
+ # NOTE(lyarwood): Stable device rescue provides the original disk
+ # mapping of the instance with the rescue device appened to the
+ # end. As a result we need to provide the original image_meta, the
+ # new rescue_image_meta and block_device_info when calling
+ # get_disk_info.
+ rescue_image_meta = image_meta
+ if instance.image_ref:
+ image_meta = objects.ImageMeta.from_image_ref(
+ context, self._image_api, instance.image_ref)
+ else:
+ # NOTE(lyarwood): If instance.image_ref isn't set attempt to
+ # lookup the original image_meta from the bdms. This will
+ # return an empty dict if no valid image_meta is found.
+ image_meta_dict = utils.get_bdm_image_metadata(
+ context, self._image_api, self._volume_api,
+ block_device_info['block_device_mapping'],
+ legacy_bdm=False)
+ image_meta = objects.ImageMeta.from_dict(image_meta_dict)
+
+ else:
+ LOG.info("Attempting rescue", instance=instance)
+ # NOTE(lyarwood): A legacy rescue only provides the rescue device
+ # and the original root device so we don't need to provide
+ # block_device_info to the get_disk_info call.
+ block_device_info = None
+
+ disk_info = blockinfo.get_disk_info(virt_type, instance, image_meta,
+ rescue=True, block_device_info=block_device_info,
+ rescue_image_meta=rescue_image_meta)
+ LOG.debug("rescue generated disk_info: %s", disk_info)
+
injection_info = InjectionInfo(network_info=network_info,
admin_pass=rescue_password,
files=None)
@@ -3454,7 +3508,8 @@ class LibvirtDriver(driver.ComputeDriver):
disk_images=rescue_images)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
- mdevs=mdevs)
+ mdevs=mdevs,
+ block_device_info=block_device_info)
self._destroy(instance)
self._create_domain(xml, post_xml_callback=gen_confdrive)
@@ -4407,7 +4462,7 @@ class LibvirtDriver(driver.ComputeDriver):
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
- image_type=None):
+ image_type=None, boot_order=None):
disk_unit = None
disk = self.image_backend.by_name(instance, name, image_type)
if (name == 'disk.config' and image_type == 'rbd' and
@@ -4430,7 +4485,8 @@ class LibvirtDriver(driver.ComputeDriver):
conf = disk.libvirt_info(disk_info, self.disk_cachemode,
inst_type['extra_specs'],
self._host.get_version(),
- disk_unit=disk_unit)
+ disk_unit=disk_unit,
+ boot_order=boot_order)
return conf
def _get_guest_fs_config(self, instance, name, image_type=None):
@@ -4502,7 +4558,7 @@ class LibvirtDriver(driver.ComputeDriver):
devices = devices + _get_ephemeral_devices()
else:
- if rescue:
+ if rescue and disk_mapping['disk.rescue'] == disk_mapping['root']:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
@@ -4542,7 +4598,10 @@ class LibvirtDriver(driver.ComputeDriver):
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
- config_name = 'disk.config.rescue' if rescue else 'disk.config'
+ config_name = 'disk.config'
+ if rescue and disk_mapping['disk.rescue'] == disk_mapping['root']:
+ config_name = 'disk.config.rescue'
+
if config_name in disk_mapping:
diskconfig = self._get_guest_disk_config(
instance, config_name, disk_mapping, inst_type,
@@ -4575,6 +4634,12 @@ class LibvirtDriver(driver.ComputeDriver):
if scsi_controller:
devices.append(scsi_controller)
+ if rescue and disk_mapping['disk.rescue'] != disk_mapping['root']:
+ diskrescue = self._get_guest_disk_config(instance, 'disk.rescue',
+ disk_mapping, inst_type,
+ boot_order='1')
+ devices.append(diskrescue)
+
return devices
@staticmethod
@@ -5383,6 +5448,12 @@ class LibvirtDriver(driver.ComputeDriver):
any((os.path.exists(p)
for p in DEFAULT_UEFI_LOADER_PATH[caps.host.cpu.arch])))
+ def _check_uefi_support(self, hw_firmware_type):
+ caps = self._host.get_capabilities()
+ return (self._has_uefi_support() and
+ (hw_firmware_type == fields.FirmwareType.UEFI or
+ caps.host.cpu.arch == fields.Architecture.AARCH64))
+
def _get_supported_perf_events(self):
if (len(CONF.libvirt.enabled_perf_events) == 0):
@@ -6562,7 +6633,7 @@ class LibvirtDriver(driver.ComputeDriver):
# .format() can return IndexError
except (exception.PciDeviceWrongAddressFormat, IndexError):
# this is not a valid PCI address
- LOG.warning("The PCI address %s was invalid for getting the"
+ LOG.warning("The PCI address %s was invalid for getting the "
"related vGPU type", device_address)
return
try:
@@ -10039,7 +10110,7 @@ class LibvirtDriver(driver.ComputeDriver):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
- block_device_info=None, power_on=True):
+ allocations, block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
block_disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
@@ -10108,9 +10179,13 @@ class LibvirtDriver(driver.ComputeDriver):
info['type'] == 'raw' and CONF.use_cow_images):
self._disk_raw_to_qcow2(info['path'])
+ # Does the guest need to be assigned some vGPU mediated devices ?
+ mdevs = self._allocate_mdevs(allocations)
+
xml = self._get_guest_xml(context, instance, network_info,
block_disk_info, image_meta,
- block_device_info=block_device_info)
+ block_device_info=block_device_info,
+ mdevs=mdevs)
# NOTE(mriedem): vifs_already_plugged=True here, regardless of whether
# or not we've migrated to another host, because we unplug VIFs locally
# and the status change in the port might go undetected by the neutron
@@ -10170,9 +10245,15 @@ class LibvirtDriver(driver.ComputeDriver):
instance,
instance.image_meta,
block_device_info)
+
+ # The guest could already have mediated devices, using them for
+ # the new XML
+ mdevs = list(self._get_all_assigned_mediated_devices(instance))
+
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
- block_device_info=block_device_info)
+ block_device_info=block_device_info,
+ mdevs=mdevs)
# NOTE(artom) In some Neutron or port configurations we've already
# waited for vif-plugged events in the compute manager's
# _finish_revert_resize_network_migrate_finish(), right after updating
@@ -10686,7 +10767,7 @@ class LibvirtDriver(driver.ComputeDriver):
:return: A dict of trait names mapped to boolean values.
"""
all_buses = set(itertools.chain(
- *blockinfo.SUPPORTED_STORAGE_BUSES.values()
+ *blockinfo.SUPPORTED_DEVICE_BUSES.values()
))
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
@@ -10698,7 +10779,7 @@ class LibvirtDriver(driver.ComputeDriver):
dom_caps[arch_type][machine_type].devices.disk.buses
)
else:
- supported_buses = blockinfo.SUPPORTED_STORAGE_BUSES.get(
+ supported_buses = blockinfo.SUPPORTED_DEVICE_BUSES.get(
CONF.libvirt.virt_type, []
)
@@ -10832,7 +10913,7 @@ class LibvirtDriver(driver.ComputeDriver):
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
ctxt.reraise = False
- LOG.info('URI %(uri)s does not support full set'
- ' of host capabilities: %(error)s',
- {'uri': self._host._uri, 'error': ex})
+ LOG.debug('URI %(uri)s does not support full set'
+ ' of host capabilities: %(error)s',
+ {'uri': self._host._uri, 'error': ex})
return None
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 00c387cd8c..2bd695d484 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -294,23 +294,6 @@ class LibvirtGenericVIFDriver(object):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
- def get_config_bridge(self, instance, vif, image_meta,
- inst_type, virt_type, host):
- """Get VIF configurations for bridge type."""
- conf = self.get_base_config(instance, vif['address'], image_meta,
- inst_type, virt_type, vif['vnic_type'],
- host)
-
- designer.set_vif_host_backend_bridge_config(
- conf, self.get_bridge_name(vif),
- self.get_vif_devname(vif))
-
- designer.set_vif_bandwidth_config(conf, inst_type)
-
- self._set_mtu_config(vif, host, conf)
-
- return conf
-
def _set_mtu_config(self, vif, host, conf):
""":param vif: nova.network.modle.vif
:param host: nova.virt.libvirt.host.Host
@@ -613,8 +596,6 @@ class LibvirtGenericVIFDriver(object):
args = (instance, vif, image_meta, inst_type, virt_type, host)
if vif_type == network_model.VIF_TYPE_IOVISOR:
return self.get_config_iovisor(*args)
- elif vif_type == network_model.VIF_TYPE_BRIDGE:
- return self.get_config_bridge(*args)
elif vif_type == network_model.VIF_TYPE_802_QBG:
return self.get_config_802qbg(*args)
elif vif_type == network_model.VIF_TYPE_802_QBH:
diff --git a/nova/virt/libvirt/volume/net.py b/nova/virt/libvirt/volume/net.py
index 1164395a4b..ef065c8f5a 100644
--- a/nova/virt/libvirt/volume/net.py
+++ b/nova/virt/libvirt/volume/net.py
@@ -10,9 +10,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from os_brick import exception as os_brick_exception
+from os_brick import initiator
+from os_brick.initiator import connector
from oslo_log import log as logging
import nova.conf
+from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
@@ -25,6 +29,10 @@ class LibvirtNetVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
def __init__(self, host):
super(LibvirtNetVolumeDriver,
self).__init__(host, is_block_dev=False)
+ self.connector = None
+ if CONF.workarounds.rbd_volume_local_attach:
+ self.connector = connector.InitiatorConnector.factory(
+ initiator.RBD, utils.get_root_helper(), do_local_attach=True)
def _set_auth_config_rbd(self, conf, netdisk_properties):
# The rbd volume driver in cinder sets auth_enabled if the rbd_user is
@@ -67,11 +75,37 @@ class LibvirtNetVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
# secret_type is always hard-coded to 'ceph' in cinder
conf.auth_secret_type = netdisk_properties['secret_type']
- def get_config(self, connection_info, disk_info):
- """Returns xml for libvirt."""
- conf = super(LibvirtNetVolumeDriver,
- self).get_config(connection_info, disk_info)
+ def _use_rbd_volume_local_attach(self, connection_info):
+ return (connection_info['driver_volume_type'] == 'rbd' and
+ CONF.workarounds.rbd_volume_local_attach)
+ def connect_volume(self, connection_info, instance):
+ if self._use_rbd_volume_local_attach(connection_info):
+ LOG.debug("Calling os-brick to attach RBD Volume as block device",
+ instance=instance)
+ device_info = self.connector.connect_volume(
+ connection_info['data'])
+ LOG.debug("Attached RBD volume %s", device_info, instance=instance)
+ connection_info['data']['device_path'] = device_info['path']
+
+ def disconnect_volume(self, connection_info, instance):
+ if self._use_rbd_volume_local_attach(connection_info):
+ LOG.debug("calling os-brick to detach RBD Volume",
+ instance=instance)
+ try:
+ self.connector.disconnect_volume(connection_info['data'], None)
+ except os_brick_exception.VolumeDeviceNotFound as exc:
+ LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
+ return
+ LOG.debug("Disconnected RBD Volume", instance=instance)
+
+ def _get_block_config(self, conf, connection_info):
+ conf.source_type = "block"
+ conf.source_path = connection_info['data']['device_path']
+ conf.driver_io = "native"
+ return conf
+
+ def _get_net_config(self, conf, connection_info):
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
@@ -82,8 +116,18 @@ class LibvirtNetVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
self._set_auth_config_rbd(conf, netdisk_properties)
return conf
+ def get_config(self, connection_info, disk_info):
+ """Returns xml for libvirt."""
+ conf = super(LibvirtNetVolumeDriver,
+ self).get_config(connection_info, disk_info)
+ if self._use_rbd_volume_local_attach(connection_info):
+ return self._get_block_config(conf, connection_info)
+ return self._get_net_config(conf, connection_info)
+
def extend_volume(self, connection_info, instance, requested_size):
- # There is nothing to do for network volumes. Cinder already extended
- # the volume and there is no local block device which needs to be
- # refreshed.
+ if self._use_rbd_volume_local_attach(connection_info):
+ raise NotImplementedError
+ # There is nothing to do for network volumes. Cinder already
+ # extended the volume and there is no local block device which
+ # needs to be refreshed.
return requested_size
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 4c565ba960..251d8bfe4e 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -68,6 +68,7 @@ class PowerVMDriver(driver.ComputeDriver):
# capabilities on the instance rather than on the class.
self.capabilities = {
'has_imagecache': False,
+ 'supports_bfv_rescue': False,
'supports_evacuate': False,
'supports_migrate_to_same_host': False,
'supports_attach_interface': True,
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 1509e7e147..852c3d33a5 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -287,7 +287,7 @@ class VMwareVCDriver(driver.ComputeDriver):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
- block_device_info=None, power_on=True):
+ allocations, block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index afa2583436..5b9cde85de 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -238,7 +238,7 @@ class XenAPIDriver(driver.ComputeDriver):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
- block_device_info=None, power_on=True):
+ allocations, block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index d139b9f480..2c6c936f0d 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -24,6 +24,7 @@ import functools
import sys
from cinderclient import api_versions as cinder_api_versions
+from cinderclient import apiclient as cinder_apiclient
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from keystoneauth1 import exceptions as keystone_exception
@@ -33,6 +34,7 @@ from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import strutils
+import retrying
import six
from six.moves import urllib
@@ -566,6 +568,9 @@ class API(object):
mountpoint, mode=mode)
@translate_volume_exception
+ @retrying.retry(stop_max_attempt_number=5,
+ retry_on_exception=lambda e:
+ type(e) == cinder_apiclient.exceptions.InternalServerError)
def detach(self, context, volume_id, instance_uuid=None,
attachment_id=None):
client = cinderclient(context)
@@ -629,6 +634,9 @@ class API(object):
exc.code if hasattr(exc, 'code') else None)})
@translate_volume_exception
+ @retrying.retry(stop_max_attempt_number=5,
+ retry_on_exception=lambda e:
+ type(e) == cinder_apiclient.exceptions.InternalServerError)
def terminate_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.terminate_connection(volume_id,
connector)
@@ -875,6 +883,9 @@ class API(object):
'code': getattr(ex, 'code', None)})
@translate_attachment_exception
+ @retrying.retry(stop_max_attempt_number=5,
+ retry_on_exception=lambda e:
+ type(e) == cinder_apiclient.exceptions.InternalServerError)
def attachment_delete(self, context, attachment_id):
try:
cinderclient(
diff --git a/releasenotes/notes/bp-policy-defaults-refresh-b8e6e2d6b1a7bc21.yaml b/releasenotes/notes/bp-policy-defaults-refresh-b8e6e2d6b1a7bc21.yaml
new file mode 100644
index 0000000000..b6be3955d7
--- /dev/null
+++ b/releasenotes/notes/bp-policy-defaults-refresh-b8e6e2d6b1a7bc21.yaml
@@ -0,0 +1,137 @@
+---
+features:
+ - |
+ The Nova policies implemented the scope concept and new default roles
+ (``admin``, ``member``, and ``reader``) provided by keystone.
+upgrade:
+ - |
+ All the policies except the deprecated APIs policy have been changed to
+ implement the ``scope_type`` and new defaults. Deprecated APIs policy will
+ be moved to ``scope_type`` and new defaults in the next release.
+
+ Please refer `Policy New Defaults`_ for detail about policy new defaults
+ and migration plan.
+
+ * **Scope**
+
+ Each policy is protected with appropriate ``scope_type``. Nova support
+ two types of ``sope_type`` with their combination. ``['system']``,
+ ``['project']`` and ``['system', 'project']``.
+
+ To know each policy scope_type, please refer the `Policy Reference`_
+
+ This feature is disabled by default can be enabled via config option
+ ``[oslo_policy]enforce_scope`` in ``nova.conf``
+
+ * **New Defaults(Admin, Member and Reader)**
+
+ Policies are default to Admin, Member and Reader roles. Old roles
+ are also supproted. You can switch to new defaults via config option
+ ``[oslo_policy]enforce_new_defaults`` in ``nova.conf`` file.
+
+ * **Policies granularity**
+
+ To implement the reader roles, Below policies are made more granular
+
+ - ``os_compute_api:os-agents`` is made granular to
+
+ - ``os_compute_api:os-agents:create``
+ - ``os_compute_api:os-agents:update``
+ - ``os_compute_api:os-agents:delete``
+ - ``os_compute_api:os-agents:list``
+
+ - ``os_compute_api:os-attach-interfaces`` is made granular to
+
+ - ``os_compute_api:os-attach-interfaces:create``
+ - ``os_compute_api:os-attach-interfaces:delete``
+ - ``os_compute_api:os-attach-interfaces:show``
+ - ``os_compute_api:os-attach-interfaces:list``
+
+ - ``os_compute_api:os-deferred-delete`` is made granular to
+
+ - ``os_compute_api:os-deferred-delete:restore``
+ - ``os_compute_api:os-deferred-delete:force``
+
+ - ``os_compute_api:os-hypervisors`` is made granular to
+
+ - ``os_compute_api:os-hypervisors:list``
+ - ``os_compute_api:os-hypervisors:list-detail``
+ - ``os_compute_api:os-hypervisors:statistics``
+ - ``os_compute_api:os-hypervisors:show``
+ - ``os_compute_api:os-hypervisors:uptime``
+ - ``os_compute_api:os-hypervisors:search``
+ - ``os_compute_api:os-hypervisors:servers``
+
+ - ``os_compute_api:os-security-groups`` is made granular to
+
+ - ``os_compute_api:os-security-groups:add``
+ - ``os_compute_api:os-security-groups:remove``
+ - ``os_compute_api:os-security-groups:list``
+
+ - ``os_compute_api:os-instance-usage-audit-log`` is made granular to
+
+ - ``os_compute_api:os-instance-usage-audit-log:list``
+ - ``os_compute_api:os-instance-usage-audit-log:show``
+
+ - ``os_compute_api:os-instance-actions`` is made granular to
+
+ - ``os_compute_api:os-instance-actions:list``
+ - ``os_compute_api:os-instance-actions:show``
+
+ - ``os_compute_api:os-server-password`` is made granular to
+
+ - ``os_compute_api:os-server-password:show``
+ - ``os_compute_api:os-server-password:clear``
+
+ - ``os_compute_api:os-rescue`` is made granular to
+
+ - ``os_compute_api:os-rescue``
+ - ``os_compute_api:os-unrescue``
+
+ - ``os_compute_api:os-used-limits`` is renamed to
+
+ - ``os_compute_api:limits:other_project``
+
+ - ``os_compute_api:os-services`` is made granular to
+
+ - ``os_compute_api:os-services:list``
+ - ``os_compute_api:os-services:update``
+ - ``os_compute_api:os-services:delete``
+deprecations:
+ - |
+ During Policy new defaults, below policies are deprecated and will be
+ removed in 23.0.0 release. These are replaced by the new granular
+ policies listed in feature section.
+
+ - ``os_compute_api:os-agents``
+ - ``os_compute_api:os-attach-interfaces``
+ - ``os_compute_api:os-deferred-delete``
+ - ``os_compute_api:os-hypervisors``
+ - ``os_compute_api:os-security-groups``
+ - ``os_compute_api:os-instance-usage-audit-log``
+ - ``os_compute_api:os-instance-actions``
+ - ``os_compute_api:os-server-password``
+ - ``os_compute_api:os-used-limits``
+ - ``os_compute_api:os-services``
+fixes:
+ - |
+ Below bugs are fixed for policies default values
+
+ - https://bugs.launchpad.net/nova/+bug/1863009
+ - https://bugs.launchpad.net/nova/+bug/1869396
+ - https://bugs.launchpad.net/nova/+bug/1867840
+ - https://bugs.launchpad.net/nova/+bug/1869791
+ - https://bugs.launchpad.net/nova/+bug/1869841
+ - https://bugs.launchpad.net/nova/+bug/1869543
+ - https://bugs.launchpad.net/nova/+bug/1870883
+ - https://bugs.launchpad.net/nova/+bug/1871287
+ - https://bugs.launchpad.net/nova/+bug/1870488
+ - https://bugs.launchpad.net/nova/+bug/1870872
+ - https://bugs.launchpad.net/nova/+bug/1870484
+ - https://bugs.launchpad.net/nova/+bug/1870881
+ - https://bugs.launchpad.net/nova/+bug/1871665
+ - https://bugs.launchpad.net/nova/+bug/1870226
+
+ .. _policy-defaults-refresh: https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/policy-defaults-refresh.html
+ .. _Policy Reference: https://docs.openstack.org/nova/latest/configuration/policy.html
+ .. _Policy New Defaults: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html
diff --git a/releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml b/releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml
index 76c96e755a..0b32b43ee9 100644
--- a/releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml
+++ b/releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml
@@ -3,4 +3,19 @@ features:
- |
The 2.86 microversion adds support for flavor extra spec validation when
creating or updating flavor extra specs. Use of an unrecognized or invalid
- flavor extra spec will result in a HTTP 400 response.
+ flavor extra spec in the following namespaces will result in a HTTP 400
+ response.
+
+ - ``accel``
+ - ``aggregate_instance_extra_specs``
+ - ``capabilities``
+ - ``hw``
+ - ``hw_rng``
+ - ``hw_video``
+ - ``os``
+ - ``pci_passthrough``
+ - ``powervm``
+ - ``quota``
+ - ``resources`` (including ``_{group}`` suffixes)
+ - ``trait`` (including ``_{group}`` suffixes)
+ - ``vmware``
diff --git a/releasenotes/notes/stable_rescue_bfv-cd0e9f0f7e9eaa25.yaml b/releasenotes/notes/stable_rescue_bfv-cd0e9f0f7e9eaa25.yaml
new file mode 100644
index 0000000000..80aefaa2c0
--- /dev/null
+++ b/releasenotes/notes/stable_rescue_bfv-cd0e9f0f7e9eaa25.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Stable device rescue for boot from volume instances is now supported
+ through the use of the 2.87 microversion when the compute hosting the
+ instance also reports the ``COMPUTE_RESCUE_BFV`` trait such as the libvirt
+ driver.
+
+ No changes have been made to the request or reponse parameters of the
+ rescue API itself.
diff --git a/releasenotes/notes/workarounds-libvirt-disable-native-luks-a4eccca8019db243.yaml b/releasenotes/notes/workarounds-libvirt-disable-native-luks-a4eccca8019db243.yaml
new file mode 100644
index 0000000000..fedf88e6f2
--- /dev/null
+++ b/releasenotes/notes/workarounds-libvirt-disable-native-luks-a4eccca8019db243.yaml
@@ -0,0 +1,26 @@
+---
+other:
+ - |
+ The ``[workarounds]/disable_native_luksv1`` configuration option has
+ been introduced. This can be used by operators to workaround recently
+ discovered performance issues found within the `libgcrypt library`__ used
+ by QEMU when natively decrypting LUKSv1 encrypted disks. Enabling this
+ option will result in the use of the legacy ``dm-crypt`` based os-brick
+ provided encryptors.
+
+ Operators should be aware that this workaround only applies when using the
+ libvirt compute driver with attached encrypted Cinder volumes using the
+ ``luks`` encryption provider. The ``luks2`` encryption provider will
+ continue to use the ``dm-crypt`` based os-brick encryptors regardless of
+ what this configurable is set to.
+
+ This workaround is temporary and will be removed during the W release once
+ all impacted distributions have been able to update their versions of the
+ libgcrypt library.
+
+ .. warning:: Operators must ensure no instances are running on the compute
+ host before enabling this workaround. Any instances with encrypted LUKSv1
+ disks left running on the hosts will fail to migrate or stop after this
+ workaround has been enabled.
+
+ .. __: https://bugzilla.redhat.com/show_bug.cgi?id=1762765
diff --git a/releasenotes/notes/workarounds-libvirt-rbd-host-block-devices-ca5e3c187342ab4d.yaml b/releasenotes/notes/workarounds-libvirt-rbd-host-block-devices-ca5e3c187342ab4d.yaml
new file mode 100644
index 0000000000..eb740d0ada
--- /dev/null
+++ b/releasenotes/notes/workarounds-libvirt-rbd-host-block-devices-ca5e3c187342ab4d.yaml
@@ -0,0 +1,23 @@
+---
+other:
+ - |
+ The ``[workarounds]/rbd_volume_local_attach`` configuration option has been
+ introduced. This can be used by operators to ensure RBD volumes are
+ connected to compute hosts as block devices. This can be used with
+ the ``[worarounds]/disable_native_luksv1`` configuration option to
+ workaround recently discovered performance issues found within the
+ `libgcrypt library`__ used by QEMU when natively decrypting LUKSv1
+ encrypted disks.
+
+ This workaround does not currently support extending attached volumes.
+
+ This workaround is temporary and will be removed during the W release once
+ all impacted distributions have been able to update their versions of the
+ libgcrypt library.
+
+ .. warning:: Operators must ensure no instances are running on the compute
+ host before enabling this workaround. Any instances with attached RBD
+ volumes left running on the hosts will fail to migrate or stop after this
+ workaround has been enabled.
+
+ .. __: https://bugzilla.redhat.com/show_bug.cgi?id=1762765
diff --git a/requirements.txt b/requirements.txt
index 099f7cabd7..1ab8c54bdb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -45,7 +45,7 @@ oslo.utils>=4.1.0 # Apache-2.0
oslo.db>=4.44.0 # Apache-2.0
oslo.rootwrap>=5.8.0 # Apache-2.0
oslo.messaging>=10.3.0 # Apache-2.0
-oslo.policy>=2.3.0 # Apache-2.0
+oslo.policy>=3.1.0 # Apache-2.0
oslo.privsep>=1.33.2 # Apache-2.0
oslo.i18n>=3.15.3 # Apache-2.0
oslo.service>=1.40.1 # Apache-2.0
@@ -53,7 +53,7 @@ rfc3986>=1.1.0 # Apache-2.0
oslo.middleware>=3.31.0 # Apache-2.0
psutil>=3.2.2 # BSD
oslo.versionedobjects>=1.35.0 # Apache-2.0
-os-brick>=2.6.2 # Apache-2.0
+os-brick>=3.0.1 # Apache-2.0
os-resource-classes>=0.4.0 # Apache-2.0
os-traits>=2.2.0 # Apache-2.0
os-vif>=1.14.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index fcb083667a..c5119ca798 100644
--- a/tox.ini
+++ b/tox.ini
@@ -60,17 +60,22 @@ commands =
description =
Run functional tests using python3.
# As nova functional tests import the PlacementFixture from the placement
-# repository these tests are, by default, set up to run with latest master from
-# the placement repo. In the gate, Zuul will clone the latest master from
-# placement OR the version of placement the Depends-On in the commit message
-# suggests. If you want to run the test locally with an un-merged placement
-# change, modify this line locally to point to your dependency or pip install
-# placement into the appropriate tox virtualenv. We express the requirement
-# here instead of test-requirements because we do not want placement present
-# during unit tests.
+# repository these tests are, by default, set up to run with openstack-placement
+# from pypi. In the gate, Zuul will use the installed version of placement (stable
+# branch version on stable gate run) OR the version of placement the Depends-On in
+# the commit message suggests. If you want to run the tests with latest master from
+# the placement repo, modify the dep line to point at master, example:
+# deps =
+# {[testenv]deps}
+# git+https://opendev.org/openstack/placement#egg=openstack-placement
+# If you want to run the test locally with an un-merged placement change,
+# modify the dep line to point to your dependency or pip install placement
+# into the appropriate tox virtualenv.
+# NOTE: We express the requirement here instead of test-requirements
+# because we do not want placement present during unit tests.
deps =
{[testenv]deps}
- git+https://opendev.org/openstack/placement#egg=openstack-placement
+ openstack-placement>=1.0.0
commands =
# NOTE(cdent): The group_regex describes how stestr will group tests into the
# same process when running concurently. The following ensures that gabbi tests