summaryrefslogtreecommitdiff
path: root/ironic
diff options
context:
space:
mode:
authorJulia Kreger <juliaashleykreger@gmail.com>2023-01-09 15:01:43 -0800
committerJulia Kreger <juliaashleykreger@gmail.com>2023-01-18 07:59:35 -0800
commitbad3790e8a877a418666c5d4608f0a1a39acd67f (patch)
tree4a3f50ce138e44d7dc55ecadc0c17fe933bb841b /ironic
parenta48af6b5f13598ef83fff6dfd5a01480ed23743d (diff)
downloadironic-bad3790e8a877a418666c5d4608f0a1a39acd67f.tar.gz
Add `service` role RBAC policy support
This change adds support for the ``service`` role, which is intended largely for service to service communiation, such as if one wanted to utilzie a "nova" project, and have an ironic service user within it, and then configure the ``nova-compute`` service utilizing those credentials. Or vice versa, an "ironic" project, with a nova user. In this case, access is exceptionally similar to the rights afforded to a "project scoped manager" or an "owner-admin". Change-Id: Ifd098a4567d60c90550afe5236ae2af143b6bac2
Diffstat (limited to 'ironic')
-rw-r--r--ironic/common/policy.py41
-rw-r--r--ironic/tests/unit/api/test_rbac_project_scoped.yaml435
-rw-r--r--ironic/tests/unit/api/test_rbac_system_scoped.yaml156
3 files changed, 617 insertions, 15 deletions
diff --git a/ironic/common/policy.py b/ironic/common/policy.py
index afce51c77..39733c732 100644
--- a/ironic/common/policy.py
+++ b/ironic/common/policy.py
@@ -57,7 +57,7 @@ SYSTEM_MEMBER = 'role:member and system_scope:all'
# support. These uses are also able to view project-specific resources where
# applicable (e.g., listing all volumes in the deployment, regardless of the
# project they belong to).
-SYSTEM_READER = 'role:reader and system_scope:all'
+SYSTEM_READER = '(role:reader and system_scope:all) or (role:service and system_scope:all)' # noqa
# This check string is reserved for actions that require the highest level of
# authorization on a project or resources within the project (e.g., setting the
@@ -83,6 +83,14 @@ PROJECT_MEMBER = ('role:member and '
PROJECT_READER = ('role:reader and '
'(project_id:%(node.owner)s or project_id:%(node.lessee)s)')
+# This check string is used for granting access to other services which need
+# to communicate with Ironic, for example, Nova-Compute to provision nodes,
+# or Ironic-Inspector to create nodes. The idea behind a service role is
+# one which has restricted access to perform operations, that are limited
+# to remote automated and inter-operation processes.
+SYSTEM_SERVICE = ('role:service and system_scope:all')
+PROJECT_SERVICE = ('role:service and project_id:%(node.owner)s')
+
# The following are common composite check strings that are useful for
# protecting APIs designed to operate with multiple scopes (e.g., a system
# administrator should be able to delete any baremetal host in the deployment,
@@ -91,7 +99,7 @@ SYSTEM_OR_PROJECT_MEMBER = (
'(' + SYSTEM_MEMBER + ') or (' + PROJECT_MEMBER + ')'
)
SYSTEM_OR_PROJECT_READER = (
- '(' + SYSTEM_READER + ') or (' + PROJECT_READER + ')'
+ '(' + SYSTEM_READER + ') or (' + PROJECT_READER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
PROJECT_OWNER_ADMIN = ('role:admin and project_id:%(node.owner)s')
@@ -109,28 +117,36 @@ ALLOCATION_OWNER_MANAGER = ('role:manager and project_id:%(allocation.owner)s')
ALLOCATION_OWNER_MEMBER = ('role:member and project_id:%(allocation.owner)s')
ALLOCATION_OWNER_READER = ('role:reader and project_id:%(allocation.owner)s')
+# Used for general operations like changing provision state.
SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ')' # noqa
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used for creation and deletion of network ports.
SYSTEM_ADMIN_OR_OWNER_ADMIN = (
- '(' + SYSTEM_ADMIN + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ')' # noqa
+ '(' + SYSTEM_ADMIN + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used to map system members, and owner admins to the same access rights.
+# This is actions such as update driver interfaces, delete ports.
SYSTEM_MEMBER_OR_OWNER_ADMIN = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ')' # noqa
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used to map "member" only rights, i.e. those of "users using a deployment"
SYSTEM_MEMBER_OR_OWNER_MEMBER = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_MEMBER + ')'
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used throughout to map where authenticated readers
+# should be able to read API objects.
SYSTEM_OR_OWNER_READER = (
- '(' + SYSTEM_READER + ') or (' + PROJECT_OWNER_READER + ')'
+ '(' + SYSTEM_READER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_READER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Mainly used for targets/connectors
SYSTEM_MEMBER_OR_OWNER_LESSEE_ADMIN = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ')' # noqa
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
@@ -152,7 +168,10 @@ ALLOCATION_CREATOR = (
# Special purpose aliases for things like "ability to access the API
# as a reader, or permission checking that does not require node
# owner relationship checking
-API_READER = ('role:reader')
+API_READER = ('(role:reader) or (role:service)')
+
+# Used for ability to view target properties of a volume, which is
+# considered highly restricted.
TARGET_PROPERTIES_READER = (
'(' + SYSTEM_READER + ') or (role:admin)'
)
@@ -436,7 +455,7 @@ deprecated_bios_disable_cleaning = policy.DeprecatedRule(
node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:create',
- check_str=SYSTEM_ADMIN,
+ check_str='(' + SYSTEM_ADMIN + ') or (' + SYSTEM_SERVICE + ')',
scope_types=['system', 'project'],
description='Create Node records',
operations=[{'path': '/nodes', 'method': 'POST'}],
@@ -444,7 +463,7 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:create:self_owned_node',
- check_str=('role:admin'),
+ check_str=('(role:admin) or (role:service)'),
scope_types=['project'],
description='Create node records which will be tracked '
'as owned by the associated user project.',
diff --git a/ironic/tests/unit/api/test_rbac_project_scoped.yaml b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
index b57f7fc5c..ad3342e86 100644
--- a/ironic/tests/unit/api/test_rbac_project_scoped.yaml
+++ b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
@@ -74,6 +74,14 @@ values:
X-Auth-Token: 'third-party-admin-token'
X-Project-Id: ae64129e-b188-4662-b014-4127f4366ee6
X-Roles: admin,manager,member,reader
+ service_headers: &service_headers
+ X-Auth-Token: 'service-token'
+ X-Project-Id: ae64129e-b188-4662-b014-4127f4366ee6
+ X-Roles: service
+ service_headers_owner_project: &service_headers_owner_project
+ X-Auth-Token: 'service-token'
+ X-Project-Id: 70e5e25a-2ca2-4cb1-8ae8-7d8739cee205
+ X-Roles: service
owner_project_id: &owner_project_id 70e5e25a-2ca2-4cb1-8ae8-7d8739cee205
lessee_project_id: &lessee_project_id f11853c7-fa9c-4db3-a477-c9d8e0dbbf13
owned_node_ident: &owned_node_ident f11853c7-fa9c-4db3-a477-c9d8e0dbbf13
@@ -100,6 +108,22 @@ owner_admin_can_post_nodes:
assert_status: 503
self_manage_nodes: True
+service_nodes_cannot_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *service_headers
+ body: *node_post_body
+ assert_status: 403
+ self_manage_nodes: False
+
+service_nodes_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *service_headers
+ body: *node_post_body
+ assert_status: 503
+ self_manage_nodes: True
+
owner_manager_cannot_post_nodes:
path: '/v1/nodes'
method: post
@@ -716,6 +740,18 @@ owner_admin_can_delete_nodes:
assert_status: 503
self_manage_nodes: True
+service_cannot_delete_owner_admin_nodes:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
+service_can_delete_nodes_in_own_project:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_manager_cannot_delete_nodes:
path: '/v1/nodes/{owner_node_ident}'
method: delete
@@ -1306,7 +1342,6 @@ lessee_manager_can_change_provision_state:
body: *provision_body
assert_status: 503
-
lessee_member_cannot_change_provision_state:
path: '/v1/nodes/{lessee_node_ident}/states/provision'
method: put
@@ -1321,6 +1356,20 @@ third_party_admin_cannot_change_provision_state:
body: *provision_body
assert_status: 404
+service_can_change_provision_state_for_own_nodes:
+ path: '/v1/nodes/{owner_node_ident}/states/provision'
+ method: put
+ headers: *service_headers_owner_project
+ body: *provision_body
+ assert_status: 503
+
+service_cannot_change_provision_state:
+ path: '/v1/nodes/{owner_node_ident}/states/provision'
+ method: put
+ headers: *service_headers
+ body: *provision_body
+ assert_status: 404
+
# Raid configuration
owner_admin_can_set_raid_config:
@@ -1363,6 +1412,13 @@ owner_member_can_set_raid_config:
body: *raid_body
assert_status: 503
+owner_member_can_set_raid_config:
+ path: '/v1/nodes/{lessee_node_ident}/states/raid'
+ method: put
+ headers: *service_headers_owner_project
+ body: *raid_body
+ assert_status: 503
+
lessee_member_cannot_set_raid_config:
path: '/v1/nodes/{lessee_node_ident}/states/raid'
method: put
@@ -1377,6 +1433,14 @@ third_party_admin_cannot_set_raid_config:
body: *raid_body
assert_status: 404
+service_cannot_set_raid_config:
+ path: '/v1/nodes/{lessee_node_ident}/states/raid'
+ method: put
+ headers: *service_headers
+ body: *raid_body
+ assert_status: 404
+
+
# Console
owner_admin_can_get_console:
@@ -1391,6 +1455,12 @@ owner_manager_can_get_console:
headers: *owner_manager_headers
assert_status: 503
+owner_service_can_get_console:
+ path: '/v1/nodes/{owner_node_ident}/states/console'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_cannot_get_console:
path: '/v1/nodes/{lessee_node_ident}/states/console'
method: get
@@ -1476,6 +1546,20 @@ lessee_member_cannot_set_console:
body: *console_body_put
assert_status: 403
+owner_service_can_set_console:
+ path: '/v1/nodes/{owner_node_ident}/states/console'
+ method: put
+ headers: *service_headers_owner_project
+ body: *console_body_put
+ assert_status: 503
+
+service_cannot_set_console:
+ path: '/v1/nodes/{owner_node_ident}/states/console'
+ method: put
+ headers: *service_headers
+ body: *console_body_put
+ assert_status: 404
+
# Vendor Passthru - https://docs.openstack.org/api-ref/baremetal/?expanded=#node-vendor-passthru-nodes
# owner/lessee vendor passthru methods inaccessible
@@ -1494,6 +1578,12 @@ owner_manager_cannot_get_vendor_passthru_methods:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_get_vendor_passthru_methods:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru/methods'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_get_vendor_passthru_methods:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru/methods'
method: get
@@ -1543,6 +1633,12 @@ owner_manager_cannot_get_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_get_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_get_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: get
@@ -1593,6 +1689,12 @@ owner_manager_cannot_post_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_post_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_post_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: post
@@ -1643,6 +1745,12 @@ owner_manager_cannot_put_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_put_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: put
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_put_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: put
@@ -1693,6 +1801,12 @@ owner_manager_cannot_delete_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_delete_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_delete_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: delete
@@ -1737,6 +1851,12 @@ owner_reader_get_traits:
headers: *owner_reader_headers
assert_status: 200
+owner_reader_get_traits:
+ path: '/v1/nodes/{owner_node_ident}/traits'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_get_traits:
path: '/v1/nodes/{lessee_node_ident}/traits'
method: get
@@ -1766,6 +1886,13 @@ owner_manager_can_put_traits:
assert_status: 503
body: *traits_body
+owner_service_can_put_traits:
+ path: '/v1/nodes/{owner_node_ident}/traits'
+ method: put
+ headers: *service_headers_owner_project
+ assert_status: 503
+ body: *traits_body
+
owner_member_cannot_put_traits:
path: '/v1/nodes/{owner_node_ident}/traits'
method: put
@@ -1801,6 +1928,13 @@ third_party_admin_cannot_put_traits:
assert_status: 404
body: *traits_body
+third_party_admin_cannot_put_traits:
+ path: '/v1/nodes/{lessee_node_ident}/traits'
+ method: put
+ headers: *service_headers
+ assert_status: 404
+ body: *traits_body
+
owner_admin_can_delete_traits:
path: '/v1/nodes/{owner_node_ident}/traits/{trait}'
method: delete
@@ -1917,6 +2051,21 @@ owner_admin_can_post_vifs:
body: &vif_body
id: ee21d58f-5de2-4956-85ff-33935ea1ca00
+service_can_post_vifs_for_own_project:
+ path: '/v1/nodes/{owner_node_ident}/vifs'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 503
+ body: *vif_body
+
+service_cannot_post_vifs_for_other_project:
+ path: '/v1/nodes/{owner_node_ident}/vifs'
+ method: post
+ headers: *service_headers
+ # NOTE(TheJulia): This is a 404 because the node should not be visible.
+ assert_status: 404
+ body: *vif_body
+
owner_manager_can_post_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs'
method: post
@@ -2015,6 +2164,18 @@ third_party_admin_cannot_delete_vifs:
headers: *third_party_admin_headers
assert_status: 404
+service_can_delete_vifs:
+ path: '/v1/nodes/{owner_node_ident}/vifs/{vif_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
+service_cannot_delete_other_nodes_vifs:
+ path: '/v1/nodes/{owner_node_ident}/vifs/{vif_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Indicators - https://docs.openstack.org/api-ref/baremetal/#indicators-management
owner_readers_can_get_indicators:
path: '/v1/nodes/{owner_node_ident}/management/indicators'
@@ -2078,6 +2239,14 @@ owner_reader_can_list_portgroups:
assert_list_length:
portgroups: 2
+owner_service_can_list_portgroups:
+ path: '/v1/portgroups'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ assert_list_length:
+ portgroups: 2
+
lessee_reader_can_list_portgroups:
path: '/v1/portgroups'
method: get
@@ -2122,6 +2291,13 @@ owner_admin_can_add_portgroup:
node_uuid: 1ab63b9e-66d7-4cd7-8618-dddd0f9f7881
assert_status: 201
+owner_service_can_add_portgroup:
+ path: '/v1/portgroups'
+ method: post
+ headers: *service_headers_owner_project
+ body: *owner_portgroup_body
+ assert_status: 201
+
owner_manager_can_add_portgroup:
path: '/v1/portgroups'
method: post
@@ -2237,6 +2413,12 @@ owner_member_cannot_delete_portgroup:
headers: *owner_member_headers
assert_status: 403
+owner_service_can_delete_portgroup:
+ path: '/v1/portgroups/{owner_portgroup_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_cannot_delete_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
method: delete
@@ -2261,6 +2443,12 @@ third_party_admin_cannot_delete_portgroup:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_delete_portgroup:
+ path: '/v1/portgroups/{lessee_portgroup_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Portgroups by node - https://docs.openstack.org/api-ref/baremetal/#listing-portgroups-by-node-nodes-portgroups
owner_reader_can_get_node_portgroups:
@@ -2281,6 +2469,13 @@ third_party_admin_cannot_get_portgroups:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_portgroups:
+ path: '/v1/nodes/{lessee_node_ident}/portgroups'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
+
# Ports - https://docs.openstack.org/api-ref/baremetal/#ports-ports
# Based on ports_* tests
@@ -2294,6 +2489,15 @@ owner_reader_can_list_ports:
assert_list_length:
ports: 3
+owner_service_can_list_ports:
+ path: '/v1/ports'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ # Two ports owned, one on the leased node. 1 invisible.
+ assert_list_length:
+ ports: 3
+
lessee_reader_can_list_ports:
path: '/v1/ports'
method: get
@@ -2316,6 +2520,12 @@ owner_reader_can_read_port:
headers: *owner_reader_headers
assert_status: 200
+owner_service_can_read_port:
+ path: '/v1/ports/{owner_port_ident}'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_read_port:
path: '/v1/ports/{lessee_port_ident}'
method: get
@@ -2362,6 +2572,13 @@ owner_manager_cannot_add_ports_to_other_nodes:
body: *other_node_add_port_body
assert_status: 403
+owner_service_cannot_add_ports_to_other_nodes:
+ path: '/v1/ports'
+ method: post
+ headers: *service_headers_owner_project
+ body: *other_node_add_port_body
+ assert_status: 403
+
owner_member_cannot_add_port:
path: '/v1/ports'
method: post
@@ -2399,6 +2616,20 @@ third_party_admin_cannot_add_port:
body: *lessee_port_body
assert_status: 403
+service_can_add_port:
+ path: '/v1/ports'
+ method: post
+ headers: *service_headers_owner_project
+ body: *owner_port_body
+ assert_status: 503
+
+service_cannot_add_ports_to_other_project:
+ path: '/v1/ports'
+ method: post
+ headers: *service_headers
+ body: *owner_port_body
+ assert_status: 403
+
owner_admin_can_modify_port:
path: '/v1/ports/{owner_port_ident}'
method: patch
@@ -2416,6 +2647,13 @@ owner_manager_can_modify_port:
body: *port_patch_body
assert_status: 503
+owner_service_can_modify_port:
+ path: '/v1/ports/{owner_port_ident}'
+ method: patch
+ headers: *service_headers_owner_project
+ body: *port_patch_body
+ assert_status: 503
+
owner_member_cannot_modify_port:
path: '/v1/ports/{owner_port_ident}'
method: patch
@@ -2463,6 +2701,12 @@ owner_manager_can_delete_port:
headers: *owner_manager_headers
assert_status: 503
+owner_service_can_delete_port:
+ path: '/v1/ports/{owner_port_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
owner_member_cannot_delete_port:
path: '/v1/ports/{owner_port_ident}'
method: delete
@@ -2503,6 +2747,14 @@ owner_reader_can_get_node_ports:
assert_list_length:
ports: 2
+owner_service_can_get_node_ports:
+ path: '/v1/nodes/{owner_node_ident}/ports'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ assert_list_length:
+ ports: 2
+
lessee_reader_can_get_node_port:
path: '/v1/nodes/{lessee_node_ident}/ports'
method: get
@@ -2517,6 +2769,12 @@ third_party_admin_cannot_get_ports:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_ports:
+ path: '/v1/nodes/{lessee_node_ident}/ports'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Ports by portgroup - https://docs.openstack.org/api-ref/baremetal/#listing-ports-by-portgroup-portgroup-ports
# Based on portgroups_ports_get* tests
@@ -2527,6 +2785,12 @@ owner_reader_can_get_ports_by_portgroup:
headers: *owner_reader_headers
assert_status: 200
+owner_service_cam_get_ports_by_portgroup:
+ path: '/v1/portgroups/{owner_portgroup_ident}/ports'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_get_ports_by_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}/ports'
method: get
@@ -2539,6 +2803,13 @@ third_party_admin_cannot_get_ports_by_portgroup:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_ports_by_portgroup:
+ path: '/v1/portgroups/{other_portgroup_ident}/ports'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
+
# Volume(s) - https://docs.openstack.org/api-ref/baremetal/#volume-volume
# TODO(TheJulia): volumes will likely need some level of exhaustive testing.
# i.e. ensure that the volume is permissible. However this may not be possible
@@ -2587,6 +2858,13 @@ owner_manager_can_post_volume_connector:
assert_status: 201
body: *volume_connector_body
+owner_service_can_post_volume_connector:
+ path: '/v1/volume/connectors'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 201
+ body: *volume_connector_body
+
lessee_admin_cannot_post_volume_connector:
path: '/v1/volume/connectors'
method: post
@@ -2608,6 +2886,13 @@ third_party_admin_cannot_post_volume_connector:
assert_status: 403
body: *volume_connector_body
+service_admin_cannot_post_volume_connector:
+ path: '/v1/volume/connectors'
+ method: post
+ headers: *service_headers
+ assert_status: 403
+ body: *volume_connector_body
+
owner_reader_can_get_volume_connector:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
@@ -2698,6 +2983,12 @@ owner_manager_can_delete_volume_connectors:
headers: *owner_manager_headers
assert_status: 503
+owner_service_can_delete_volume_connectors:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_can_delete_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
@@ -2716,6 +3007,12 @@ third_party_admin_cannot_delete_volume_connector:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_delete_volume_connector:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Volume targets
# TODO(TheJulia): Create at least 3 targets.
@@ -2776,6 +3073,13 @@ owner_admin_create_volume_target:
boot_index: 2
volume_id: 'test-id'
+owner_service_create_volume_target:
+ path: '/v1/volume/targets'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 201
+ body: *volume_target_body
+
owner_manager_create_volume_target:
path: '/v1/volume/targets'
method: post
@@ -2826,6 +3130,13 @@ owner_member_can_patch_volume_target:
headers: *owner_member_headers
assert_status: 503
+owner_service_can_patch_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_can_patch_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: patch
@@ -2854,6 +3165,13 @@ third_party_admin_cannot_patch_volume_target:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_patch_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *service_headers
+ assert_status: 404
+
owner_admin_can_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
@@ -2866,6 +3184,12 @@ owner_manager_can_delete_volume_target:
headers: *owner_manager_headers
assert_status: 503
+owner_manager_can_delete_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_can_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
@@ -2896,6 +3220,12 @@ third_party_admin_cannot_delete_volume_target:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_delete_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Get Volumes by Node - https://docs.openstack.org/api-ref/baremetal/#listing-volume-resources-by-node-nodes-volume
owner_reader_can_get_volume_connectors:
@@ -2904,6 +3234,12 @@ owner_reader_can_get_volume_connectors:
headers: *owner_reader_headers
assert_status: 200
+owner_service_can_get_volume_connectors:
+ path: '/v1/nodes/{owner_node_ident}/volume/connectors'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_get_node_volume_connectors:
path: '/v1/nodes/{lessee_node_ident}/volume/connectors'
method: get
@@ -2916,12 +3252,24 @@ third_party_admin_cannot_get_node_volume_connectors:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_node_volume_connectors:
+ path: '/v1/nodes/{lessee_node_ident}/volume/connectors'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
owner_reader_can_get_node_volume_targets:
path: '/v1/nodes/{owner_node_ident}/volume/targets'
method: get
headers: *owner_reader_headers
assert_status: 200
+owner_service_can_read_get_node_volume_targets:
+ path: '/v1/nodes/{owner_node_ident}/volume/targets'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_get_node_volume_targets:
path: '/v1/nodes/{lessee_node_ident}/volume/targets'
method: get
@@ -2934,6 +3282,12 @@ third_part_admin_cannot_read_node_volume_targets:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_read_node_volume_targets:
+ path: '/v1/nodes/{lessee_node_ident}/volume/targets'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Drivers - https://docs.openstack.org/api-ref/baremetal/#drivers-drivers
# This is a system scoped endpoint, everything should fail in this section.
@@ -2956,6 +3310,12 @@ third_party_admin_cannot_get_drivers:
headers: *third_party_admin_headers
assert_status: 500
+service_cannot_get_drivers:
+ path: '/v1/drivers'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
# Driver vendor passthru - https://docs.openstack.org/api-ref/baremetal/#driver-vendor-passthru-drivers
# This is a system scoped endpoint, everything should fail in this section.
@@ -2978,6 +3338,12 @@ third_party_admin_cannot_get_drivers_vendor_passthru:
headers: *third_party_admin_headers
assert_status: 500
+service_cannot_get_drivers_vendor_passthru:
+ path: '/v1/drivers/{driver_name}/vendor_passthru/methods'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
# Node Bios - https://docs.openstack.org/api-ref/baremetal/#node-bios-nodes
owner_reader_can_get_bios_setttings:
@@ -2998,6 +3364,18 @@ third_party_admin_cannot_get_bios_settings:
headers: *third_party_admin_headers
assert_status: 404
+service_can_get_bios_setttings_owner_project:
+ path: '/v1/nodes/{owner_node_ident}/bios'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
+service_cannot_get_bios_setttings:
+ path: '/v1/nodes/{owner_node_ident}/bios'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Conductors - https://docs.openstack.org/api-ref/baremetal/#allocations-allocations
# This is a system scoped endpoint, everything should fail in this section.
@@ -3271,7 +3649,7 @@ third_party_admin_cannot_get_deploy_templates:
third_party_admin_cannot_post_deploy_template:
path: '/v1/deploy_templates'
method: post
- body:
+ body: &deploy_template
name: 'CUSTOM_TEST_TEMPLATE'
steps:
- interface: 'deploy'
@@ -3281,6 +3659,19 @@ third_party_admin_cannot_post_deploy_template:
headers: *third_party_admin_headers
assert_status: 500
+service_cannot_get_deploy_templates:
+ path: '/v1/deploy_templates'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
+service_cannot_post_deploy_template:
+ path: '/v1/deploy_templates'
+ method: post
+ body: *deploy_template
+ headers: *service_headers
+ assert_status: 500
+
# Chassis endpoints - https://docs.openstack.org/api-ref/baremetal/#chassis-chassis
# This is a system scoped endpoint, everything should fail in this section.
@@ -3311,6 +3702,20 @@ third_party_admin_cannot_create_chassis:
description: 'test-chassis'
assert_status: 500
+service_cannot_access_chassis:
+ path: '/v1/chassis'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
+service_cannot_create_chassis:
+ path: '/v1/chassis'
+ method: post
+ headers: *service_headers
+ body:
+ description: 'test-chassis'
+ assert_status: 500
+
# Node history entries
node_history_get_admin:
@@ -3337,6 +3742,20 @@ node_history_get_reader:
assert_list_length:
history: 1
+node_history_get_service:
+ path: '/v1/nodes/{owner_node_ident}/history'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ assert_list_length:
+ history: 1
+
+node_history_get_service_cannot_be_retrieved:
+ path: '/v1/nodes/{owner_node_ident}/history'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
node_history_get_entry_admin:
path: '/v1/nodes/{owner_node_ident}/history/{owned_history_ident}'
method: get
@@ -3391,6 +3810,12 @@ lessee_node_history_get_entry_reader:
headers: *lessee_reader_headers
assert_status: 404
+owner_service_node_history_get_entry_reader:
+ path: '/v1/nodes/{owner_node_ident}/history/{owned_history_ident}'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
third_party_admin_cannot_get_node_history:
path: '/v1/nodes/{owner_node_ident}'
method: get
@@ -3403,6 +3828,12 @@ node_history_get_entry_admin:
headers: *third_party_admin_headers
assert_status: 404
+node_history_get_entry_service:
+ path: '/v1/nodes/{owner_node_ident}/history/{owned_history_ident}'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Node inventory support
node_inventory_get_admin:
diff --git a/ironic/tests/unit/api/test_rbac_system_scoped.yaml b/ironic/tests/unit/api/test_rbac_system_scoped.yaml
index 533356217..a980fefc7 100644
--- a/ironic/tests/unit/api/test_rbac_system_scoped.yaml
+++ b/ironic/tests/unit/api/test_rbac_system_scoped.yaml
@@ -23,6 +23,10 @@ values:
X-Project-ID: a1111111111111111111111111111111
X-Roles: admin
X-Project-Name: 'other-project'
+ service_headers: &service_headers
+ X-Auth-Token: 'baremetal-service-token'
+ X-Roles: service
+ OpenStack-System-Scope: all
owner_project_id: &owner_project_id '{owner_project_id}'
other_project_id: &other_project_id '{other_project_id}'
node_ident: &node_ident '{node_ident}'
@@ -52,6 +56,13 @@ nodes_post_reader:
body: *node_post_body
assert_status: 403
+nodes_post_service:
+ path: '/v1/nodes'
+ method: post
+ headers: *service_headers
+ body: *node_post_body
+ assert_status: 503
+
nodes_get_node_admin:
path: '/v1/nodes/{node_ident}'
method: get
@@ -92,6 +103,14 @@ nodes_get_admin:
nodes: 3
assert_status: 200
+nodes_get_service:
+ path: '/v1/nodes'
+ method: get
+ headers: *service_headers
+ assert_list_length:
+ nodes: 3
+ assert_status: 200
+
nodes_get_other_admin:
path: '/v1/nodes'
method: get
@@ -119,6 +138,12 @@ nodes_detail_get_reader:
headers: *reader_headers
assert_status: 200
+nodes_detail_get_service:
+ path: '/v1/nodes/detail'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
nodes_node_ident_get_admin:
path: '/v1/nodes/{node_ident}'
method: get
@@ -187,6 +212,12 @@ nodes_node_ident_delete_admin:
headers: *admin_headers
assert_status: 503
+nodes_node_ident_delete_service:
+ path: '/v1/nodes/{node_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 403
+
nodes_node_ident_delete_member:
path: '/v1/nodes/{node_ident}'
method: delete
@@ -337,7 +368,6 @@ nodes_management_inject_nmi_put_reader:
body: {}
assert_status: 403
-
nodes_states_get_admin:
path: '/v1/nodes/{node_ident}/states'
method: get
@@ -448,6 +478,13 @@ nodes_states_provision_put_reader:
body: *provision_body
assert_status: 403
+nodes_states_provision_put_service:
+ path: '/v1/nodes/{node_ident}/states/provision'
+ method: put
+ headers: *service_headers
+ body: *provision_body
+ assert_status: 503
+
nodes_states_raid_put_admin:
path: '/v1/nodes/{node_ident}/states/raid'
method: put
@@ -486,12 +523,18 @@ nodes_states_console_get_member:
headers: *scoped_member_headers
assert_status: 503
-nodes_states_console_get_admin:
+nodes_states_console_get_reader:
path: '/v1/nodes/{node_ident}/states/console'
method: get
headers: *reader_headers
assert_status: 403
+nodes_states_console_get_service:
+ path: '/v1/nodes/{node_ident}/states/console'
+ method: get
+ headers: *service_headers
+ assert_status: 503
+
nodes_states_console_put_admin:
path: '/v1/nodes/{node_ident}/states/console'
method: put
@@ -514,6 +557,13 @@ nodes_states_console_put_reader:
body: *console_body_put
assert_status: 403
+nodes_states_console_put_service:
+ path: '/v1/nodes/{node_ident}/states/console'
+ method: put
+ headers: *service_headers
+ body: *console_body_put
+ assert_status: 503
+
# Node Traits - https://docs.openstack.org/api-ref/baremetal/?expanded=#node-vendor-passthru-nodes
# Calls conductor upon the get as a task is required.
@@ -729,6 +779,12 @@ nodes_vifs_get_reader:
headers: *reader_headers
assert_status: 503
+nodes_vifs_get_service:
+ path: '/v1/nodes/{node_ident}/vifs'
+ method: get
+ headers: *service_headers
+ assert_status: 503
+
nodes_vifs_post_admin:
path: '/v1/nodes/{node_ident}/vifs'
method: post
@@ -751,6 +807,13 @@ nodes_vifs_post_reader:
assert_status: 403
body: *vif_body
+nodes_vifs_post_service:
+ path: '/v1/nodes/{node_ident}/vifs'
+ method: post
+ headers: *service_headers
+ assert_status: 503
+ body: *vif_body
+
# This calls the conductor, hence not status 403.
nodes_vifs_node_vif_ident_delete_admin:
path: '/v1/nodes/{node_ident}/vifs/{vif_ident}'
@@ -770,6 +833,12 @@ nodes_vifs_node_vif_ident_delete_reader:
headers: *reader_headers
assert_status: 403
+nodes_vifs_node_vif_ident_delete_service:
+ path: '/v1/nodes/{node_ident}/vifs/{vif_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 503
+
# Indicators - https://docs.openstack.org/api-ref/baremetal/#indicators-management
nodes_management_indicators_get_allow:
@@ -1182,6 +1251,12 @@ volume_get_reader:
headers: *reader_headers
assert_status: 200
+volume_get_service:
+ path: '/v1/volume'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# Volume connectors
volume_connectors_get_admin:
@@ -1202,6 +1277,12 @@ volume_connectors_get_reader:
headers: *reader_headers
assert_status: 200
+volume_connectors_get_service:
+ path: '/v1/volume/connectors'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# NOTE(TheJulia): This ends up returning a 400 due to the
# UUID not already being in ironic.
volume_connectors_post_admin:
@@ -1230,6 +1311,13 @@ volume_connectors_post_reader:
assert_status: 403
body: *volume_connector_body
+volume_connectors_post_service:
+ path: '/v1/volume/connectors'
+ method: post
+ headers: *service_headers
+ assert_status: 201
+ body: *volume_connector_body
+
volume_volume_connector_id_get_admin:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
@@ -1272,6 +1360,13 @@ volume_volume_connector_id_patch_reader:
body: *connector_patch_body
assert_status: 403
+volume_volume_connector_id_patch_service:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: patch
+ headers: *service_headers
+ body: *connector_patch_body
+ assert_status: 503
+
volume_volume_connector_id_delete_admin:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
@@ -1290,6 +1385,12 @@ volume_volume_connector_id_delete_reader:
headers: *reader_headers
assert_status: 403
+volume_volume_connector_id_delete_service:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 503
+
# Volume targets
volume_targets_get_admin:
@@ -1310,6 +1411,12 @@ volume_targets_get_reader:
headers: *reader_headers
assert_status: 200
+volume_targets_get_service:
+ path: '/v1/volume/targets'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# NOTE(TheJulia): Because we can't seem to get the uuid
# to load from an existing uuid, since we're not subsituting
# it, this will return with 400 due to the ID not matching.
@@ -1360,6 +1467,12 @@ volume_volume_target_id_get_reader:
headers: *reader_headers
assert_status: 200
+volume_volume_target_id_get_service:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# NOTE(TheJulia): This triggers a call to the conductor and
# thus will fail, but does not return a 403 which means success.
volume_volume_target_id_patch_admin:
@@ -1386,6 +1499,13 @@ volume_volume_target_id_patch_reader:
headers: *reader_headers
assert_status: 403
+volume_volume_target_id_patch_service:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *service_headers
+ assert_status: 503
+
volume_volume_target_id_delete_admin:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
@@ -1404,6 +1524,12 @@ volume_volume_target_id_delete_reader:
headers: *reader_headers
assert_status: 403
+volume_volume_target_id_delete_service:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 503
+
# Get Volumes by Node - https://docs.openstack.org/api-ref/baremetal/#listing-volume-resources-by-node-nodes-volume
nodes_volume_get_admin:
@@ -2002,6 +2128,12 @@ chassis_get_reader:
headers: *reader_headers
assert_status: 200
+chassis_get_service:
+ path: '/v1/chassis'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
chassis_detail_get_admin:
path: '/v1/chassis/detail'
method: get
@@ -2080,6 +2212,12 @@ chassis_chassis_id_delete_reader:
headers: *reader_headers
assert_status: 403
+chassis_chassis_id_delete_service:
+ path: '/v1/chassis/{chassis_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 403
+
# Node history entries
node_history_get_admin:
@@ -2106,6 +2244,14 @@ node_history_get_reader:
assert_list_length:
history: 1
+node_history_get_service:
+ path: '/v1/nodes/{node_ident}/history'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+ assert_list_length:
+ history: 1
+
node_history_get_entry_admin:
path: '/v1/nodes/{node_ident}/history/{history_ident}'
method: get
@@ -2137,3 +2283,9 @@ node_inventory_get_reader:
method: get
headers: *reader_headers
assert_status: 200
+
+node_history_get_entry_service:
+ path: '/v1/nodes/{node_ident}/history/{history_ident}'
+ method: get
+ headers: *service_headers
+ assert_status: 200