summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/parameters.yaml8
-rwxr-xr-xdevstack/tools/ironic/scripts/cirros-partition.sh2
-rw-r--r--doc/source/admin/secure-rbac.rst27
-rw-r--r--doc/source/contributor/releasing.rst20
-rw-r--r--ironic/api/controllers/v1/node.py18
-rw-r--r--ironic/common/policy.py41
-rw-r--r--ironic/common/rpc.py7
-rw-r--r--ironic/conf/__init__.py2
-rw-r--r--ironic/conf/default.py2
-rw-r--r--ironic/conf/inspector.py11
-rw-r--r--ironic/conf/inventory.py34
-rw-r--r--ironic/conf/opts.py1
-rw-r--r--ironic/drivers/modules/inspect_utils.py87
-rw-r--r--ironic/drivers/modules/inspector.py61
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py12
-rw-r--r--ironic/tests/unit/api/test_rbac_project_scoped.yaml435
-rw-r--r--ironic/tests/unit/api/test_rbac_system_scoped.yaml156
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspect_utils.py121
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspector.py54
-rw-r--r--ironic/tests/unit/drivers/modules/test_snmp.py4
-rw-r--r--releasenotes/config.yaml5
-rw-r--r--releasenotes/notes/add-service-role-support-8e9390769508ca99.yaml13
-rw-r--r--releasenotes/notes/fix-grub2-uefi-config-path-f1b4c5083cc97ee5.yaml14
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po354
-rw-r--r--reno.yaml4
-rw-r--r--requirements.txt2
-rw-r--r--zuul.d/ironic-jobs.yaml6
27 files changed, 1096 insertions, 405 deletions
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index b55ef405f..361cca531 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -343,13 +343,17 @@ r_port_node_ident:
description: |
Filter the list of returned Ports, and only return the ones associated
with this specific node (name or UUID), or an empty set if not found.
+ This filter takes precedence over all other filters, and cannot be set at
+ the same time as node_uuid or portgroup.
in: query
required: false
type: string
r_port_node_uuid:
description: |
Filter the list of returned Ports, and only return the ones associated
- with this specific node UUID, or an empty set if not found.
+ with this specific node UUID, or an empty set if not found. This filter
+ takes precedence over all other filters, and cannot be set at the same
+ time as node or portgroup.
in: query
required: false
type: string
@@ -357,6 +361,8 @@ r_port_portgroup_ident:
description: |
Filter the list of returned Ports, and only return the ones associated
with this specific Portgroup (name or UUID), or an empty set if not found.
+ This filter takes precedence over all other filters, and cannot be set at
+ the same time as node_uuid or node.
in: query
required: false
type: string
diff --git a/devstack/tools/ironic/scripts/cirros-partition.sh b/devstack/tools/ironic/scripts/cirros-partition.sh
index 40c87b19e..facf9b030 100755
--- a/devstack/tools/ironic/scripts/cirros-partition.sh
+++ b/devstack/tools/ironic/scripts/cirros-partition.sh
@@ -8,7 +8,7 @@ if [[ "$VERBOSE" == True ]]; then
guestfish_args="--verbose"
fi
-CIRROS_VERSION=${CIRROS_VERSION:-0.5.2}
+CIRROS_VERSION=${CIRROS_VERSION:-0.6.1}
CIRROS_ARCH=${CIRROS_ARCH:-x86_64}
# TODO(dtantsur): use the image cached on infra images in the CI
DISK_URL=http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img
diff --git a/doc/source/admin/secure-rbac.rst b/doc/source/admin/secure-rbac.rst
index 7721211b6..1f1bb66d1 100644
--- a/doc/source/admin/secure-rbac.rst
+++ b/doc/source/admin/secure-rbac.rst
@@ -280,3 +280,30 @@ imparts ``owner`` privileges to the created Bare Metal node.
This functionality can be disabled by setting
``[api]project_admin_can_manage_own_nodes`` to ``False``.
+
+Can I use a service role?
+-------------------------
+
+In later versions of Ironic, the ``service`` role has been added to enable
+delineation of accounts and access to Ironic's API. As Ironic's API was
+largely originally intended as an "admin" API service, the service role
+enables similar levels of access as a project-scoped user with the
+``admin`` or ``manager`` roles.
+
+In terms of access, this is likely best viewed as a user with the
+``manager`` role, but with slight elevation in privilege to enable
+usage of the service via a service account.
+
+A project scoped user with the ``service`` role is able to create
+baremetal nodes, but is not able to delete them. To disable the
+ability to create nodes, set the
+``[api]project_admin_can_manage_own_nodes`` setting to ``False``.
+The nodes which can be accessed/managed in the project scope also align
+with the ``owner`` and ``lessee`` access model, and thus if nodes are not
+matching the user's ``project_id``, then Ironic's API will appear not to
+have any enrolled baremetal nodes.
+
+With the system scope, a user with the ``service`` role is able to
+create baremetal nodes, but also, not delete them. The access rights
+are modeled such an ``admin`` scoped is needed to delete baremetal
+nodes from Ironic.
diff --git a/doc/source/contributor/releasing.rst b/doc/source/contributor/releasing.rst
index eab0649c6..3c0732c5b 100644
--- a/doc/source/contributor/releasing.rst
+++ b/doc/source/contributor/releasing.rst
@@ -125,19 +125,23 @@ openstack ``stable/NAME`` branches:
* ironic-inspector
* ironic-python-agent
-They are also released on a regular cadence as opposed to on-demand, namely
-three times a release cycle (roughly a release every 2 months). One of the
-releases corresponds to the coordinated OpenStack released and receives a
-``stable/NAME`` branch. The other two happen during the cycle and receive a
-``bugfix/X.Y`` branch, where ``X.Y`` consists of the major and the minor
-component of the version (e.g. ``bugfix/8.1`` for 8.1.0).
+These projects receive releases every six months as part of the coordinated
+OpenStack release that happens semi-annually. These releases can be
+found in a ``stable/NAME`` branch.
+
+They are also evaluated for additional bugfix releases between scheduled
+stable releases at the two and four month milestore between stable releases
+(roughly every 2 months). These releases can be found in a ``bugfix/X.Y``
+branch. A bugfix release is only created if there are significant
+beneficial changes and a known downstream operator or distributor will consume
+the release.
To leave some version space for releases from these branches, releases of these
projects from the master branch always increase either the major or the minor
version.
-Currently releases from bugfix branches cannot be automated and must be done by
-the release team manually.
+Currently releases and retirements from bugfix branches cannot be automated and
+must be done by the release team manually.
After the creation of a bugfix branch it is utmost important to update the
upper-constraints link for the tests in the tox.ini file, plus override the
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index fc6d70481..312143500 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -48,7 +48,7 @@ from ironic.common import states as ir_states
from ironic.conductor import steps as conductor_steps
import ironic.conf
from ironic.drivers import base as driver_base
-from ironic.drivers.modules import inspector as inspector
+from ironic.drivers.modules import inspect_utils
from ironic import objects
@@ -1951,11 +1951,6 @@ class NodeInventoryController(rest.RestController):
super(NodeInventoryController).__init__()
self.node_ident = node_ident
- def _node_inventory_convert(self, node_inventory):
- inventory_data = node_inventory['inventory_data']
- plugin_data = node_inventory['plugin_data']
- return {"inventory": inventory_data, "plugin_data": plugin_data}
-
@METRICS.timer('NodeInventoryController.get')
@method.expose()
@args.validate(node_ident=args.uuid_or_name)
@@ -1966,16 +1961,7 @@ class NodeInventoryController(rest.RestController):
"""
node = api_utils.check_node_policy_and_retrieve(
'baremetal:node:inventory:get', self.node_ident)
- store_data = CONF.inspector.inventory_data_backend
- if store_data == 'none':
- raise exception.NotFound(
- (_("Cannot obtain node inventory because it was not stored")))
- if store_data == 'database':
- node_inventory = objects.NodeInventory.get_by_node_id(
- api.request.context, node.id)
- return self._node_inventory_convert(node_inventory)
- if store_data == 'swift':
- return inspector.get_introspection_data(node.uuid)
+ return inspect_utils.get_introspection_data(node, api.request.context)
class NodesController(rest.RestController):
diff --git a/ironic/common/policy.py b/ironic/common/policy.py
index afce51c77..39733c732 100644
--- a/ironic/common/policy.py
+++ b/ironic/common/policy.py
@@ -57,7 +57,7 @@ SYSTEM_MEMBER = 'role:member and system_scope:all'
# support. These uses are also able to view project-specific resources where
# applicable (e.g., listing all volumes in the deployment, regardless of the
# project they belong to).
-SYSTEM_READER = 'role:reader and system_scope:all'
+SYSTEM_READER = '(role:reader and system_scope:all) or (role:service and system_scope:all)' # noqa
# This check string is reserved for actions that require the highest level of
# authorization on a project or resources within the project (e.g., setting the
@@ -83,6 +83,14 @@ PROJECT_MEMBER = ('role:member and '
PROJECT_READER = ('role:reader and '
'(project_id:%(node.owner)s or project_id:%(node.lessee)s)')
+# This check string is used for granting access to other services which need
+# to communicate with Ironic, for example, Nova-Compute to provision nodes,
+# or Ironic-Inspector to create nodes. The idea behind a service role is
+# one which has restricted access to perform operations, that are limited
+# to remote automated and inter-operation processes.
+SYSTEM_SERVICE = ('role:service and system_scope:all')
+PROJECT_SERVICE = ('role:service and project_id:%(node.owner)s')
+
# The following are common composite check strings that are useful for
# protecting APIs designed to operate with multiple scopes (e.g., a system
# administrator should be able to delete any baremetal host in the deployment,
@@ -91,7 +99,7 @@ SYSTEM_OR_PROJECT_MEMBER = (
'(' + SYSTEM_MEMBER + ') or (' + PROJECT_MEMBER + ')'
)
SYSTEM_OR_PROJECT_READER = (
- '(' + SYSTEM_READER + ') or (' + PROJECT_READER + ')'
+ '(' + SYSTEM_READER + ') or (' + PROJECT_READER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
PROJECT_OWNER_ADMIN = ('role:admin and project_id:%(node.owner)s')
@@ -109,28 +117,36 @@ ALLOCATION_OWNER_MANAGER = ('role:manager and project_id:%(allocation.owner)s')
ALLOCATION_OWNER_MEMBER = ('role:member and project_id:%(allocation.owner)s')
ALLOCATION_OWNER_READER = ('role:reader and project_id:%(allocation.owner)s')
+# Used for general operations like changing provision state.
SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ')' # noqa
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used for creation and deletion of network ports.
SYSTEM_ADMIN_OR_OWNER_ADMIN = (
- '(' + SYSTEM_ADMIN + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ')' # noqa
+ '(' + SYSTEM_ADMIN + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used to map system members, and owner admins to the same access rights.
+# This is actions such as update driver interfaces, delete ports.
SYSTEM_MEMBER_OR_OWNER_ADMIN = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ')' # noqa
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used to map "member" only rights, i.e. those of "users using a deployment"
SYSTEM_MEMBER_OR_OWNER_MEMBER = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_MEMBER + ')'
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used throughout to map where authenticated readers
+# should be able to read API objects.
SYSTEM_OR_OWNER_READER = (
- '(' + SYSTEM_READER + ') or (' + PROJECT_OWNER_READER + ')'
+ '(' + SYSTEM_READER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_READER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Mainly used for targets/connectors
SYSTEM_MEMBER_OR_OWNER_LESSEE_ADMIN = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ')' # noqa
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
@@ -152,7 +168,10 @@ ALLOCATION_CREATOR = (
# Special purpose aliases for things like "ability to access the API
# as a reader, or permission checking that does not require node
# owner relationship checking
-API_READER = ('role:reader')
+API_READER = ('(role:reader) or (role:service)')
+
+# Used for ability to view target properties of a volume, which is
+# considered highly restricted.
TARGET_PROPERTIES_READER = (
'(' + SYSTEM_READER + ') or (role:admin)'
)
@@ -436,7 +455,7 @@ deprecated_bios_disable_cleaning = policy.DeprecatedRule(
node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:create',
- check_str=SYSTEM_ADMIN,
+ check_str='(' + SYSTEM_ADMIN + ') or (' + SYSTEM_SERVICE + ')',
scope_types=['system', 'project'],
description='Create Node records',
operations=[{'path': '/nodes', 'method': 'POST'}],
@@ -444,7 +463,7 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:create:self_owned_node',
- check_str=('role:admin'),
+ check_str=('(role:admin) or (role:service)'),
scope_types=['project'],
description='Create node records which will be tracked '
'as owned by the associated user project.',
diff --git a/ironic/common/rpc.py b/ironic/common/rpc.py
index 285ee1f06..710c7a943 100644
--- a/ironic/common/rpc.py
+++ b/ironic/common/rpc.py
@@ -122,10 +122,9 @@ def get_transport_url(url_str=None):
def get_client(target, version_cap=None, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
- return messaging.RPCClient(TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer)
+ return messaging.get_rpc_client(
+ TRANSPORT, target, version_cap=version_cap,
+ serializer=serializer)
def get_server(target, endpoints, serializer=None):
diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py
index ad1ba227c..c1a893181 100644
--- a/ironic/conf/__init__.py
+++ b/ironic/conf/__init__.py
@@ -34,6 +34,7 @@ from ironic.conf import healthcheck
from ironic.conf import ibmc
from ironic.conf import ilo
from ironic.conf import inspector
+from ironic.conf import inventory
from ironic.conf import ipmi
from ironic.conf import irmc
from ironic.conf import metrics
@@ -69,6 +70,7 @@ healthcheck.register_opts(CONF)
ibmc.register_opts(CONF)
ilo.register_opts(CONF)
inspector.register_opts(CONF)
+inventory.register_opts(CONF)
ipmi.register_opts(CONF)
irmc.register_opts(CONF)
metrics.register_opts(CONF)
diff --git a/ironic/conf/default.py b/ironic/conf/default.py
index 0e3c32bd1..c7aff69cc 100644
--- a/ironic/conf/default.py
+++ b/ironic/conf/default.py
@@ -216,7 +216,7 @@ image_opts = [
'common/isolinux_config.template'),
help=_('Template file for isolinux configuration file.')),
cfg.StrOpt('grub_config_path',
- default='/boot/grub/grub.cfg',
+ default='EFI/BOOT/grub.cfg',
help=_('GRUB2 configuration file location on the UEFI ISO '
'images produced by ironic. The default value is '
'usually incorrect and should not be relied on. '
diff --git a/ironic/conf/inspector.py b/ironic/conf/inspector.py
index bee6a3e67..a7f89c994 100644
--- a/ironic/conf/inspector.py
+++ b/ironic/conf/inspector.py
@@ -39,17 +39,6 @@ opts = [
'managed by ironic. Set this to True if your '
'installation of ironic-inspector does not have a '
'separate PXE boot environment.')),
- cfg.StrOpt('inventory_data_backend',
- help=_('The storage backend for storing introspection data.'),
- choices=[('none', _('introspection data will not be stored')),
- ('database', _('introspection data stored in an SQL '
- 'database')),
- ('swift', _('introspection data stored in Swift'))],
- default='database'),
- cfg.StrOpt('swift_inventory_data_container',
- default='introspection_data_container',
- help=_('The Swift introspection data container to store '
- 'the inventory data.')),
]
diff --git a/ironic/conf/inventory.py b/ironic/conf/inventory.py
new file mode 100644
index 000000000..52f31bf60
--- /dev/null
+++ b/ironic/conf/inventory.py
@@ -0,0 +1,34 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from ironic.common.i18n import _
+
+opts = [
+ cfg.StrOpt('data_backend',
+ help=_('The storage backend for storing introspection data.'),
+ choices=[('none', _('introspection data will not be stored')),
+ ('database', _('introspection data stored in an SQL '
+ 'database')),
+ ('swift', _('introspection data stored in Swift'))],
+ default='database'),
+ cfg.StrOpt('swift_data_container',
+ default='introspection_data_container',
+ help=_('The Swift introspection data container to store '
+ 'the inventory data.')),
+]
+
+
+def register_opts(conf):
+ conf.register_opts(opts, group='inventory')
diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py
index 7576264ee..fd2e51534 100644
--- a/ironic/conf/opts.py
+++ b/ironic/conf/opts.py
@@ -32,6 +32,7 @@ _opts = [
('healthcheck', ironic.conf.healthcheck.opts),
('ilo', ironic.conf.ilo.opts),
('inspector', ironic.conf.inspector.list_opts()),
+ ('inventory', ironic.conf.inventory.opts),
('ipmi', ironic.conf.ipmi.opts),
('irmc', ironic.conf.irmc.opts),
('anaconda', ironic.conf.anaconda.opts),
diff --git a/ironic/drivers/modules/inspect_utils.py b/ironic/drivers/modules/inspect_utils.py
index 89a13e658..1340d0d7d 100644
--- a/ironic/drivers/modules/inspect_utils.py
+++ b/ironic/drivers/modules/inspect_utils.py
@@ -17,9 +17,14 @@ from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import swift
+from ironic.conf import CONF
from ironic import objects
+from ironic.objects import node_inventory
LOG = logging.getLogger(__name__)
+_OBJECT_NAME_PREFIX = 'inspector_data'
def create_ports_if_not_exist(task, macs):
@@ -51,3 +56,85 @@ def create_ports_if_not_exist(task, macs):
except exception.MACAlreadyExists:
LOG.info("Port already exists for MAC address %(address)s "
"for node %(node)s", {'address': mac, 'node': node.uuid})
+
+
+def store_introspection_data(node, introspection_data, context):
+ # If store_data == 'none', do not store the data
+ store_data = CONF.inventory.data_backend
+ if store_data == 'none':
+ LOG.debug('Introspection data storage is disabled, the data will '
+ 'not be saved for node %(node)s', {'node': node.uuid})
+ return
+ inventory_data = introspection_data.pop("inventory")
+ plugin_data = introspection_data
+ if store_data == 'database':
+ node_inventory.NodeInventory(
+ context,
+ node_id=node.id,
+ inventory_data=inventory_data,
+ plugin_data=plugin_data).create()
+ LOG.info('Introspection data was stored in database for node '
+ '%(node)s', {'node': node.uuid})
+ if store_data == 'swift':
+ swift_object_name = _store_introspection_data_in_swift(
+ node_uuid=node.uuid,
+ inventory_data=inventory_data,
+ plugin_data=plugin_data)
+ LOG.info('Introspection data was stored for node %(node)s in Swift'
+ ' object %(obj_name)s-inventory and %(obj_name)s-plugin',
+ {'node': node.uuid, 'obj_name': swift_object_name})
+
+
+def _node_inventory_convert(node_inventory):
+ inventory_data = node_inventory['inventory_data']
+ plugin_data = node_inventory['plugin_data']
+ return {"inventory": inventory_data, "plugin_data": plugin_data}
+
+
+def get_introspection_data(node, context):
+ store_data = CONF.inventory.data_backend
+ if store_data == 'none':
+ raise exception.NotFound(
+ (_("Cannot obtain node inventory because it was not stored")))
+ if store_data == 'database':
+ node_inventory = objects.NodeInventory.get_by_node_id(
+ context, node.id)
+ return _node_inventory_convert(node_inventory)
+ if store_data == 'swift':
+ return _get_introspection_data_from_swift(node.uuid)
+
+
+def _store_introspection_data_in_swift(node_uuid, inventory_data, plugin_data):
+ """Uploads introspection data to Swift.
+
+ :param data: data to store in Swift
+ :param node_id: ID of the Ironic node that the data came from
+ :returns: name of the Swift object that the data is stored in
+ """
+ swift_api = swift.SwiftAPI()
+ swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
+ container = CONF.inventory.swift_data_container
+ swift_api.create_object_from_data(swift_object_name + '-inventory',
+ inventory_data,
+ container)
+ swift_api.create_object_from_data(swift_object_name + '-plugin',
+ plugin_data,
+ container)
+ return swift_object_name
+
+
+def _get_introspection_data_from_swift(node_uuid):
+ """Uploads introspection data to Swift.
+
+ :param data: data to store in Swift
+ :param node_id: ID of the Ironic node that the data came from
+ :returns: name of the Swift object that the data is stored in
+ """
+ swift_api = swift.SwiftAPI()
+ swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
+ container = CONF.inventory.swift_data_container
+ inventory_data = swift_api.get_object(swift_object_name + '-inventory',
+ container)
+ plugin_data = swift_api.get_object(swift_object_name + '-plugin',
+ container)
+ return {"inventory": inventory_data, "plugin_data": plugin_data}
diff --git a/ironic/drivers/modules/inspector.py b/ironic/drivers/modules/inspector.py
index a4c8c1091..dbf171714 100644
--- a/ironic/drivers/modules/inspector.py
+++ b/ironic/drivers/modules/inspector.py
@@ -28,7 +28,6 @@ from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import keystone
from ironic.common import states
-from ironic.common import swift
from ironic.common import utils
from ironic.conductor import periodics
from ironic.conductor import task_manager
@@ -37,14 +36,12 @@ from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import inspect_utils
-from ironic.objects import node_inventory
LOG = logging.getLogger(__name__)
_INSPECTOR_SESSION = None
# Internal field to mark whether ironic or inspector manages boot for the node
_IRONIC_MANAGES_BOOT = 'inspector_manage_boot'
-_OBJECT_NAME_PREFIX = 'inspector_data'
def _get_inspector_session(**kwargs):
@@ -367,31 +364,15 @@ def _check_status(task):
_inspection_error_handler(task, error)
elif status.is_finished:
_clean_up(task)
- # If store_data == 'none', do not store the data
- store_data = CONF.inspector.inventory_data_backend
+ store_data = CONF.inventory.data_backend
if store_data == 'none':
LOG.debug('Introspection data storage is disabled, the data will '
'not be saved for node %(node)s', {'node': node.uuid})
return
introspection_data = inspector_client.get_introspection_data(
node.uuid, processed=True)
- inventory_data = introspection_data.pop("inventory")
- plugin_data = introspection_data
- if store_data == 'database':
- node_inventory.NodeInventory(
- node_id=node.id,
- inventory_data=inventory_data,
- plugin_data=plugin_data).create()
- LOG.info('Introspection data was stored in database for node '
- '%(node)s', {'node': node.uuid})
- if store_data == 'swift':
- swift_object_name = store_introspection_data(
- node_uuid=node.uuid,
- inventory_data=inventory_data,
- plugin_data=plugin_data)
- LOG.info('Introspection data was stored for node %(node)s in Swift'
- ' object %(obj_name)s-inventory and %(obj_name)s-plugin',
- {'node': node.uuid, 'obj_name': swift_object_name})
+ inspect_utils.store_introspection_data(node, introspection_data,
+ task.context)
def _clean_up(task):
@@ -406,39 +387,3 @@ def _clean_up(task):
LOG.info('Inspection finished successfully for node %s',
task.node.uuid)
task.process_event('done')
-
-
-def store_introspection_data(node_uuid, inventory_data, plugin_data):
- """Uploads introspection data to Swift.
-
- :param data: data to store in Swift
- :param node_id: ID of the Ironic node that the data came from
- :returns: name of the Swift object that the data is stored in
- """
- swift_api = swift.SwiftAPI()
- swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
- container = CONF.inspector.swift_inventory_data_container
- swift_api.create_object_from_data(swift_object_name + '-inventory',
- inventory_data,
- container)
- swift_api.create_object_from_data(swift_object_name + '-plugin',
- plugin_data,
- container)
- return swift_object_name
-
-
-def get_introspection_data(node_uuid):
- """Uploads introspection data to Swift.
-
- :param data: data to store in Swift
- :param node_id: ID of the Ironic node that the data came from
- :returns: name of the Swift object that the data is stored in
- """
- swift_api = swift.SwiftAPI()
- swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
- container = CONF.inspector.swift_inventory_data_container
- inventory_data = swift_api.get_object(swift_object_name + '-inventory',
- container)
- plugin_data = swift_api.get_object(swift_object_name + '-plugin',
- container)
- return {"inventory": inventory_data, "plugin_data": plugin_data}
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index 2f880db7d..3c0049a73 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -43,6 +43,7 @@ from ironic.common import indicator_states
from ironic.common import policy
from ironic.common import states
from ironic.conductor import rpcapi
+from ironic.drivers.modules import inspect_utils
from ironic.drivers.modules import inspector
from ironic import objects
from ironic.objects import fields as obj_fields
@@ -7949,17 +7950,18 @@ class TestNodeInventory(test_api_base.BaseApiTest):
def test_get_inventory(self):
self._add_inventory()
- CONF.set_override('inventory_data_backend', 'database',
- group='inspector')
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
headers={api_base.Version.string: self.version})
self.assertEqual({'inventory': self.fake_inventory_data,
'plugin_data': self.fake_plugin_data}, ret)
- @mock.patch.object(inspector, 'get_introspection_data', autospec=True)
+ @mock.patch.object(inspect_utils, '_get_introspection_data_from_swift',
+ autospec=True)
def test_get_inventory_swift(self, mock_get_data):
- CONF.set_override('inventory_data_backend', 'swift',
- group='inspector')
+ CONF.set_override('data_backend', 'swift',
+ group='inventory')
mock_get_data.return_value = {"inventory": self.fake_inventory_data,
"plugin_data": self.fake_plugin_data}
ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
diff --git a/ironic/tests/unit/api/test_rbac_project_scoped.yaml b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
index b57f7fc5c..ad3342e86 100644
--- a/ironic/tests/unit/api/test_rbac_project_scoped.yaml
+++ b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
@@ -74,6 +74,14 @@ values:
X-Auth-Token: 'third-party-admin-token'
X-Project-Id: ae64129e-b188-4662-b014-4127f4366ee6
X-Roles: admin,manager,member,reader
+ service_headers: &service_headers
+ X-Auth-Token: 'service-token'
+ X-Project-Id: ae64129e-b188-4662-b014-4127f4366ee6
+ X-Roles: service
+ service_headers_owner_project: &service_headers_owner_project
+ X-Auth-Token: 'service-token'
+ X-Project-Id: 70e5e25a-2ca2-4cb1-8ae8-7d8739cee205
+ X-Roles: service
owner_project_id: &owner_project_id 70e5e25a-2ca2-4cb1-8ae8-7d8739cee205
lessee_project_id: &lessee_project_id f11853c7-fa9c-4db3-a477-c9d8e0dbbf13
owned_node_ident: &owned_node_ident f11853c7-fa9c-4db3-a477-c9d8e0dbbf13
@@ -100,6 +108,22 @@ owner_admin_can_post_nodes:
assert_status: 503
self_manage_nodes: True
+service_nodes_cannot_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *service_headers
+ body: *node_post_body
+ assert_status: 403
+ self_manage_nodes: False
+
+service_nodes_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *service_headers
+ body: *node_post_body
+ assert_status: 503
+ self_manage_nodes: True
+
owner_manager_cannot_post_nodes:
path: '/v1/nodes'
method: post
@@ -716,6 +740,18 @@ owner_admin_can_delete_nodes:
assert_status: 503
self_manage_nodes: True
+service_cannot_delete_owner_admin_nodes:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
+service_can_delete_nodes_in_own_project:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_manager_cannot_delete_nodes:
path: '/v1/nodes/{owner_node_ident}'
method: delete
@@ -1306,7 +1342,6 @@ lessee_manager_can_change_provision_state:
body: *provision_body
assert_status: 503
-
lessee_member_cannot_change_provision_state:
path: '/v1/nodes/{lessee_node_ident}/states/provision'
method: put
@@ -1321,6 +1356,20 @@ third_party_admin_cannot_change_provision_state:
body: *provision_body
assert_status: 404
+service_can_change_provision_state_for_own_nodes:
+ path: '/v1/nodes/{owner_node_ident}/states/provision'
+ method: put
+ headers: *service_headers_owner_project
+ body: *provision_body
+ assert_status: 503
+
+service_cannot_change_provision_state:
+ path: '/v1/nodes/{owner_node_ident}/states/provision'
+ method: put
+ headers: *service_headers
+ body: *provision_body
+ assert_status: 404
+
# Raid configuration
owner_admin_can_set_raid_config:
@@ -1363,6 +1412,13 @@ owner_member_can_set_raid_config:
body: *raid_body
assert_status: 503
+owner_member_can_set_raid_config:
+ path: '/v1/nodes/{lessee_node_ident}/states/raid'
+ method: put
+ headers: *service_headers_owner_project
+ body: *raid_body
+ assert_status: 503
+
lessee_member_cannot_set_raid_config:
path: '/v1/nodes/{lessee_node_ident}/states/raid'
method: put
@@ -1377,6 +1433,14 @@ third_party_admin_cannot_set_raid_config:
body: *raid_body
assert_status: 404
+service_cannot_set_raid_config:
+ path: '/v1/nodes/{lessee_node_ident}/states/raid'
+ method: put
+ headers: *service_headers
+ body: *raid_body
+ assert_status: 404
+
+
# Console
owner_admin_can_get_console:
@@ -1391,6 +1455,12 @@ owner_manager_can_get_console:
headers: *owner_manager_headers
assert_status: 503
+owner_service_can_get_console:
+ path: '/v1/nodes/{owner_node_ident}/states/console'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_cannot_get_console:
path: '/v1/nodes/{lessee_node_ident}/states/console'
method: get
@@ -1476,6 +1546,20 @@ lessee_member_cannot_set_console:
body: *console_body_put
assert_status: 403
+owner_service_can_set_console:
+ path: '/v1/nodes/{owner_node_ident}/states/console'
+ method: put
+ headers: *service_headers_owner_project
+ body: *console_body_put
+ assert_status: 503
+
+service_cannot_set_console:
+ path: '/v1/nodes/{owner_node_ident}/states/console'
+ method: put
+ headers: *service_headers
+ body: *console_body_put
+ assert_status: 404
+
# Vendor Passthru - https://docs.openstack.org/api-ref/baremetal/?expanded=#node-vendor-passthru-nodes
# owner/lessee vendor passthru methods inaccessible
@@ -1494,6 +1578,12 @@ owner_manager_cannot_get_vendor_passthru_methods:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_get_vendor_passthru_methods:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru/methods'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_get_vendor_passthru_methods:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru/methods'
method: get
@@ -1543,6 +1633,12 @@ owner_manager_cannot_get_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_get_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_get_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: get
@@ -1593,6 +1689,12 @@ owner_manager_cannot_post_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_post_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_post_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: post
@@ -1643,6 +1745,12 @@ owner_manager_cannot_put_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_put_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: put
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_put_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: put
@@ -1693,6 +1801,12 @@ owner_manager_cannot_delete_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_delete_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_delete_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: delete
@@ -1737,6 +1851,12 @@ owner_reader_get_traits:
headers: *owner_reader_headers
assert_status: 200
+owner_reader_get_traits:
+ path: '/v1/nodes/{owner_node_ident}/traits'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_get_traits:
path: '/v1/nodes/{lessee_node_ident}/traits'
method: get
@@ -1766,6 +1886,13 @@ owner_manager_can_put_traits:
assert_status: 503
body: *traits_body
+owner_service_can_put_traits:
+ path: '/v1/nodes/{owner_node_ident}/traits'
+ method: put
+ headers: *service_headers_owner_project
+ assert_status: 503
+ body: *traits_body
+
owner_member_cannot_put_traits:
path: '/v1/nodes/{owner_node_ident}/traits'
method: put
@@ -1801,6 +1928,13 @@ third_party_admin_cannot_put_traits:
assert_status: 404
body: *traits_body
+third_party_admin_cannot_put_traits:
+ path: '/v1/nodes/{lessee_node_ident}/traits'
+ method: put
+ headers: *service_headers
+ assert_status: 404
+ body: *traits_body
+
owner_admin_can_delete_traits:
path: '/v1/nodes/{owner_node_ident}/traits/{trait}'
method: delete
@@ -1917,6 +2051,21 @@ owner_admin_can_post_vifs:
body: &vif_body
id: ee21d58f-5de2-4956-85ff-33935ea1ca00
+service_can_post_vifs_for_own_project:
+ path: '/v1/nodes/{owner_node_ident}/vifs'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 503
+ body: *vif_body
+
+service_cannot_post_vifs_for_other_project:
+ path: '/v1/nodes/{owner_node_ident}/vifs'
+ method: post
+ headers: *service_headers
+ # NOTE(TheJulia): This is a 404 because the node should not be visible.
+ assert_status: 404
+ body: *vif_body
+
owner_manager_can_post_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs'
method: post
@@ -2015,6 +2164,18 @@ third_party_admin_cannot_delete_vifs:
headers: *third_party_admin_headers
assert_status: 404
+service_can_delete_vifs:
+ path: '/v1/nodes/{owner_node_ident}/vifs/{vif_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
+service_cannot_delete_other_nodes_vifs:
+ path: '/v1/nodes/{owner_node_ident}/vifs/{vif_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Indicators - https://docs.openstack.org/api-ref/baremetal/#indicators-management
owner_readers_can_get_indicators:
path: '/v1/nodes/{owner_node_ident}/management/indicators'
@@ -2078,6 +2239,14 @@ owner_reader_can_list_portgroups:
assert_list_length:
portgroups: 2
+owner_service_can_list_portgroups:
+ path: '/v1/portgroups'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ assert_list_length:
+ portgroups: 2
+
lessee_reader_can_list_portgroups:
path: '/v1/portgroups'
method: get
@@ -2122,6 +2291,13 @@ owner_admin_can_add_portgroup:
node_uuid: 1ab63b9e-66d7-4cd7-8618-dddd0f9f7881
assert_status: 201
+owner_service_can_add_portgroup:
+ path: '/v1/portgroups'
+ method: post
+ headers: *service_headers_owner_project
+ body: *owner_portgroup_body
+ assert_status: 201
+
owner_manager_can_add_portgroup:
path: '/v1/portgroups'
method: post
@@ -2237,6 +2413,12 @@ owner_member_cannot_delete_portgroup:
headers: *owner_member_headers
assert_status: 403
+owner_service_can_delete_portgroup:
+ path: '/v1/portgroups/{owner_portgroup_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_cannot_delete_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
method: delete
@@ -2261,6 +2443,12 @@ third_party_admin_cannot_delete_portgroup:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_delete_portgroup:
+ path: '/v1/portgroups/{lessee_portgroup_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Portgroups by node - https://docs.openstack.org/api-ref/baremetal/#listing-portgroups-by-node-nodes-portgroups
owner_reader_can_get_node_portgroups:
@@ -2281,6 +2469,13 @@ third_party_admin_cannot_get_portgroups:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_portgroups:
+ path: '/v1/nodes/{lessee_node_ident}/portgroups'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
+
# Ports - https://docs.openstack.org/api-ref/baremetal/#ports-ports
# Based on ports_* tests
@@ -2294,6 +2489,15 @@ owner_reader_can_list_ports:
assert_list_length:
ports: 3
+owner_service_can_list_ports:
+ path: '/v1/ports'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ # Two ports owned, one on the leased node. 1 invisible.
+ assert_list_length:
+ ports: 3
+
lessee_reader_can_list_ports:
path: '/v1/ports'
method: get
@@ -2316,6 +2520,12 @@ owner_reader_can_read_port:
headers: *owner_reader_headers
assert_status: 200
+owner_service_can_read_port:
+ path: '/v1/ports/{owner_port_ident}'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_read_port:
path: '/v1/ports/{lessee_port_ident}'
method: get
@@ -2362,6 +2572,13 @@ owner_manager_cannot_add_ports_to_other_nodes:
body: *other_node_add_port_body
assert_status: 403
+owner_service_cannot_add_ports_to_other_nodes:
+ path: '/v1/ports'
+ method: post
+ headers: *service_headers_owner_project
+ body: *other_node_add_port_body
+ assert_status: 403
+
owner_member_cannot_add_port:
path: '/v1/ports'
method: post
@@ -2399,6 +2616,20 @@ third_party_admin_cannot_add_port:
body: *lessee_port_body
assert_status: 403
+service_can_add_port:
+ path: '/v1/ports'
+ method: post
+ headers: *service_headers_owner_project
+ body: *owner_port_body
+ assert_status: 503
+
+service_cannot_add_ports_to_other_project:
+ path: '/v1/ports'
+ method: post
+ headers: *service_headers
+ body: *owner_port_body
+ assert_status: 403
+
owner_admin_can_modify_port:
path: '/v1/ports/{owner_port_ident}'
method: patch
@@ -2416,6 +2647,13 @@ owner_manager_can_modify_port:
body: *port_patch_body
assert_status: 503
+owner_service_can_modify_port:
+ path: '/v1/ports/{owner_port_ident}'
+ method: patch
+ headers: *service_headers_owner_project
+ body: *port_patch_body
+ assert_status: 503
+
owner_member_cannot_modify_port:
path: '/v1/ports/{owner_port_ident}'
method: patch
@@ -2463,6 +2701,12 @@ owner_manager_can_delete_port:
headers: *owner_manager_headers
assert_status: 503
+owner_service_can_delete_port:
+ path: '/v1/ports/{owner_port_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
owner_member_cannot_delete_port:
path: '/v1/ports/{owner_port_ident}'
method: delete
@@ -2503,6 +2747,14 @@ owner_reader_can_get_node_ports:
assert_list_length:
ports: 2
+owner_service_can_get_node_ports:
+ path: '/v1/nodes/{owner_node_ident}/ports'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ assert_list_length:
+ ports: 2
+
lessee_reader_can_get_node_port:
path: '/v1/nodes/{lessee_node_ident}/ports'
method: get
@@ -2517,6 +2769,12 @@ third_party_admin_cannot_get_ports:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_ports:
+ path: '/v1/nodes/{lessee_node_ident}/ports'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Ports by portgroup - https://docs.openstack.org/api-ref/baremetal/#listing-ports-by-portgroup-portgroup-ports
# Based on portgroups_ports_get* tests
@@ -2527,6 +2785,12 @@ owner_reader_can_get_ports_by_portgroup:
headers: *owner_reader_headers
assert_status: 200
+owner_service_cam_get_ports_by_portgroup:
+ path: '/v1/portgroups/{owner_portgroup_ident}/ports'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_get_ports_by_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}/ports'
method: get
@@ -2539,6 +2803,13 @@ third_party_admin_cannot_get_ports_by_portgroup:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_ports_by_portgroup:
+ path: '/v1/portgroups/{other_portgroup_ident}/ports'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
+
# Volume(s) - https://docs.openstack.org/api-ref/baremetal/#volume-volume
# TODO(TheJulia): volumes will likely need some level of exhaustive testing.
# i.e. ensure that the volume is permissible. However this may not be possible
@@ -2587,6 +2858,13 @@ owner_manager_can_post_volume_connector:
assert_status: 201
body: *volume_connector_body
+owner_service_can_post_volume_connector:
+ path: '/v1/volume/connectors'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 201
+ body: *volume_connector_body
+
lessee_admin_cannot_post_volume_connector:
path: '/v1/volume/connectors'
method: post
@@ -2608,6 +2886,13 @@ third_party_admin_cannot_post_volume_connector:
assert_status: 403
body: *volume_connector_body
+service_admin_cannot_post_volume_connector:
+ path: '/v1/volume/connectors'
+ method: post
+ headers: *service_headers
+ assert_status: 403
+ body: *volume_connector_body
+
owner_reader_can_get_volume_connector:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
@@ -2698,6 +2983,12 @@ owner_manager_can_delete_volume_connectors:
headers: *owner_manager_headers
assert_status: 503
+owner_service_can_delete_volume_connectors:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_can_delete_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
@@ -2716,6 +3007,12 @@ third_party_admin_cannot_delete_volume_connector:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_delete_volume_connector:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Volume targets
# TODO(TheJulia): Create at least 3 targets.
@@ -2776,6 +3073,13 @@ owner_admin_create_volume_target:
boot_index: 2
volume_id: 'test-id'
+owner_service_create_volume_target:
+ path: '/v1/volume/targets'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 201
+ body: *volume_target_body
+
owner_manager_create_volume_target:
path: '/v1/volume/targets'
method: post
@@ -2826,6 +3130,13 @@ owner_member_can_patch_volume_target:
headers: *owner_member_headers
assert_status: 503
+owner_service_can_patch_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_can_patch_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: patch
@@ -2854,6 +3165,13 @@ third_party_admin_cannot_patch_volume_target:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_patch_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *service_headers
+ assert_status: 404
+
owner_admin_can_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
@@ -2866,6 +3184,12 @@ owner_manager_can_delete_volume_target:
headers: *owner_manager_headers
assert_status: 503
+owner_manager_can_delete_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_can_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
@@ -2896,6 +3220,12 @@ third_party_admin_cannot_delete_volume_target:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_delete_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Get Volumes by Node - https://docs.openstack.org/api-ref/baremetal/#listing-volume-resources-by-node-nodes-volume
owner_reader_can_get_volume_connectors:
@@ -2904,6 +3234,12 @@ owner_reader_can_get_volume_connectors:
headers: *owner_reader_headers
assert_status: 200
+owner_service_can_get_volume_connectors:
+ path: '/v1/nodes/{owner_node_ident}/volume/connectors'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_get_node_volume_connectors:
path: '/v1/nodes/{lessee_node_ident}/volume/connectors'
method: get
@@ -2916,12 +3252,24 @@ third_party_admin_cannot_get_node_volume_connectors:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_node_volume_connectors:
+ path: '/v1/nodes/{lessee_node_ident}/volume/connectors'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
owner_reader_can_get_node_volume_targets:
path: '/v1/nodes/{owner_node_ident}/volume/targets'
method: get
headers: *owner_reader_headers
assert_status: 200
+owner_service_can_read_get_node_volume_targets:
+ path: '/v1/nodes/{owner_node_ident}/volume/targets'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_get_node_volume_targets:
path: '/v1/nodes/{lessee_node_ident}/volume/targets'
method: get
@@ -2934,6 +3282,12 @@ third_part_admin_cannot_read_node_volume_targets:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_read_node_volume_targets:
+ path: '/v1/nodes/{lessee_node_ident}/volume/targets'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Drivers - https://docs.openstack.org/api-ref/baremetal/#drivers-drivers
# This is a system scoped endpoint, everything should fail in this section.
@@ -2956,6 +3310,12 @@ third_party_admin_cannot_get_drivers:
headers: *third_party_admin_headers
assert_status: 500
+service_cannot_get_drivers:
+ path: '/v1/drivers'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
# Driver vendor passthru - https://docs.openstack.org/api-ref/baremetal/#driver-vendor-passthru-drivers
# This is a system scoped endpoint, everything should fail in this section.
@@ -2978,6 +3338,12 @@ third_party_admin_cannot_get_drivers_vendor_passthru:
headers: *third_party_admin_headers
assert_status: 500
+service_cannot_get_drivers_vendor_passthru:
+ path: '/v1/drivers/{driver_name}/vendor_passthru/methods'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
# Node Bios - https://docs.openstack.org/api-ref/baremetal/#node-bios-nodes
owner_reader_can_get_bios_setttings:
@@ -2998,6 +3364,18 @@ third_party_admin_cannot_get_bios_settings:
headers: *third_party_admin_headers
assert_status: 404
+service_can_get_bios_setttings_owner_project:
+ path: '/v1/nodes/{owner_node_ident}/bios'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
+service_cannot_get_bios_setttings:
+ path: '/v1/nodes/{owner_node_ident}/bios'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Conductors - https://docs.openstack.org/api-ref/baremetal/#allocations-allocations
# This is a system scoped endpoint, everything should fail in this section.
@@ -3271,7 +3649,7 @@ third_party_admin_cannot_get_deploy_templates:
third_party_admin_cannot_post_deploy_template:
path: '/v1/deploy_templates'
method: post
- body:
+ body: &deploy_template
name: 'CUSTOM_TEST_TEMPLATE'
steps:
- interface: 'deploy'
@@ -3281,6 +3659,19 @@ third_party_admin_cannot_post_deploy_template:
headers: *third_party_admin_headers
assert_status: 500
+service_cannot_get_deploy_templates:
+ path: '/v1/deploy_templates'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
+service_cannot_post_deploy_template:
+ path: '/v1/deploy_templates'
+ method: post
+ body: *deploy_template
+ headers: *service_headers
+ assert_status: 500
+
# Chassis endpoints - https://docs.openstack.org/api-ref/baremetal/#chassis-chassis
# This is a system scoped endpoint, everything should fail in this section.
@@ -3311,6 +3702,20 @@ third_party_admin_cannot_create_chassis:
description: 'test-chassis'
assert_status: 500
+service_cannot_access_chassis:
+ path: '/v1/chassis'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
+service_cannot_create_chassis:
+ path: '/v1/chassis'
+ method: post
+ headers: *service_headers
+ body:
+ description: 'test-chassis'
+ assert_status: 500
+
# Node history entries
node_history_get_admin:
@@ -3337,6 +3742,20 @@ node_history_get_reader:
assert_list_length:
history: 1
+node_history_get_service:
+ path: '/v1/nodes/{owner_node_ident}/history'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ assert_list_length:
+ history: 1
+
+node_history_get_service_cannot_be_retrieved:
+ path: '/v1/nodes/{owner_node_ident}/history'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
node_history_get_entry_admin:
path: '/v1/nodes/{owner_node_ident}/history/{owned_history_ident}'
method: get
@@ -3391,6 +3810,12 @@ lessee_node_history_get_entry_reader:
headers: *lessee_reader_headers
assert_status: 404
+owner_service_node_history_get_entry_reader:
+ path: '/v1/nodes/{owner_node_ident}/history/{owned_history_ident}'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
third_party_admin_cannot_get_node_history:
path: '/v1/nodes/{owner_node_ident}'
method: get
@@ -3403,6 +3828,12 @@ node_history_get_entry_admin:
headers: *third_party_admin_headers
assert_status: 404
+node_history_get_entry_service:
+ path: '/v1/nodes/{owner_node_ident}/history/{owned_history_ident}'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Node inventory support
node_inventory_get_admin:
diff --git a/ironic/tests/unit/api/test_rbac_system_scoped.yaml b/ironic/tests/unit/api/test_rbac_system_scoped.yaml
index 533356217..a980fefc7 100644
--- a/ironic/tests/unit/api/test_rbac_system_scoped.yaml
+++ b/ironic/tests/unit/api/test_rbac_system_scoped.yaml
@@ -23,6 +23,10 @@ values:
X-Project-ID: a1111111111111111111111111111111
X-Roles: admin
X-Project-Name: 'other-project'
+ service_headers: &service_headers
+ X-Auth-Token: 'baremetal-service-token'
+ X-Roles: service
+ OpenStack-System-Scope: all
owner_project_id: &owner_project_id '{owner_project_id}'
other_project_id: &other_project_id '{other_project_id}'
node_ident: &node_ident '{node_ident}'
@@ -52,6 +56,13 @@ nodes_post_reader:
body: *node_post_body
assert_status: 403
+nodes_post_service:
+ path: '/v1/nodes'
+ method: post
+ headers: *service_headers
+ body: *node_post_body
+ assert_status: 503
+
nodes_get_node_admin:
path: '/v1/nodes/{node_ident}'
method: get
@@ -92,6 +103,14 @@ nodes_get_admin:
nodes: 3
assert_status: 200
+nodes_get_service:
+ path: '/v1/nodes'
+ method: get
+ headers: *service_headers
+ assert_list_length:
+ nodes: 3
+ assert_status: 200
+
nodes_get_other_admin:
path: '/v1/nodes'
method: get
@@ -119,6 +138,12 @@ nodes_detail_get_reader:
headers: *reader_headers
assert_status: 200
+nodes_detail_get_service:
+ path: '/v1/nodes/detail'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
nodes_node_ident_get_admin:
path: '/v1/nodes/{node_ident}'
method: get
@@ -187,6 +212,12 @@ nodes_node_ident_delete_admin:
headers: *admin_headers
assert_status: 503
+nodes_node_ident_delete_service:
+ path: '/v1/nodes/{node_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 403
+
nodes_node_ident_delete_member:
path: '/v1/nodes/{node_ident}'
method: delete
@@ -337,7 +368,6 @@ nodes_management_inject_nmi_put_reader:
body: {}
assert_status: 403
-
nodes_states_get_admin:
path: '/v1/nodes/{node_ident}/states'
method: get
@@ -448,6 +478,13 @@ nodes_states_provision_put_reader:
body: *provision_body
assert_status: 403
+nodes_states_provision_put_service:
+ path: '/v1/nodes/{node_ident}/states/provision'
+ method: put
+ headers: *service_headers
+ body: *provision_body
+ assert_status: 503
+
nodes_states_raid_put_admin:
path: '/v1/nodes/{node_ident}/states/raid'
method: put
@@ -486,12 +523,18 @@ nodes_states_console_get_member:
headers: *scoped_member_headers
assert_status: 503
-nodes_states_console_get_admin:
+nodes_states_console_get_reader:
path: '/v1/nodes/{node_ident}/states/console'
method: get
headers: *reader_headers
assert_status: 403
+nodes_states_console_get_service:
+ path: '/v1/nodes/{node_ident}/states/console'
+ method: get
+ headers: *service_headers
+ assert_status: 503
+
nodes_states_console_put_admin:
path: '/v1/nodes/{node_ident}/states/console'
method: put
@@ -514,6 +557,13 @@ nodes_states_console_put_reader:
body: *console_body_put
assert_status: 403
+nodes_states_console_put_service:
+ path: '/v1/nodes/{node_ident}/states/console'
+ method: put
+ headers: *service_headers
+ body: *console_body_put
+ assert_status: 503
+
# Node Traits - https://docs.openstack.org/api-ref/baremetal/?expanded=#node-vendor-passthru-nodes
# Calls conductor upon the get as a task is required.
@@ -729,6 +779,12 @@ nodes_vifs_get_reader:
headers: *reader_headers
assert_status: 503
+nodes_vifs_get_service:
+ path: '/v1/nodes/{node_ident}/vifs'
+ method: get
+ headers: *service_headers
+ assert_status: 503
+
nodes_vifs_post_admin:
path: '/v1/nodes/{node_ident}/vifs'
method: post
@@ -751,6 +807,13 @@ nodes_vifs_post_reader:
assert_status: 403
body: *vif_body
+nodes_vifs_post_service:
+ path: '/v1/nodes/{node_ident}/vifs'
+ method: post
+ headers: *service_headers
+ assert_status: 503
+ body: *vif_body
+
# This calls the conductor, hence not status 403.
nodes_vifs_node_vif_ident_delete_admin:
path: '/v1/nodes/{node_ident}/vifs/{vif_ident}'
@@ -770,6 +833,12 @@ nodes_vifs_node_vif_ident_delete_reader:
headers: *reader_headers
assert_status: 403
+nodes_vifs_node_vif_ident_delete_service:
+ path: '/v1/nodes/{node_ident}/vifs/{vif_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 503
+
# Indicators - https://docs.openstack.org/api-ref/baremetal/#indicators-management
nodes_management_indicators_get_allow:
@@ -1182,6 +1251,12 @@ volume_get_reader:
headers: *reader_headers
assert_status: 200
+volume_get_service:
+ path: '/v1/volume'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# Volume connectors
volume_connectors_get_admin:
@@ -1202,6 +1277,12 @@ volume_connectors_get_reader:
headers: *reader_headers
assert_status: 200
+volume_connectors_get_service:
+ path: '/v1/volume/connectors'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# NOTE(TheJulia): This ends up returning a 400 due to the
# UUID not already being in ironic.
volume_connectors_post_admin:
@@ -1230,6 +1311,13 @@ volume_connectors_post_reader:
assert_status: 403
body: *volume_connector_body
+volume_connectors_post_service:
+ path: '/v1/volume/connectors'
+ method: post
+ headers: *service_headers
+ assert_status: 201
+ body: *volume_connector_body
+
volume_volume_connector_id_get_admin:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
@@ -1272,6 +1360,13 @@ volume_volume_connector_id_patch_reader:
body: *connector_patch_body
assert_status: 403
+volume_volume_connector_id_patch_service:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: patch
+ headers: *service_headers
+ body: *connector_patch_body
+ assert_status: 503
+
volume_volume_connector_id_delete_admin:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
@@ -1290,6 +1385,12 @@ volume_volume_connector_id_delete_reader:
headers: *reader_headers
assert_status: 403
+volume_volume_connector_id_delete_service:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 503
+
# Volume targets
volume_targets_get_admin:
@@ -1310,6 +1411,12 @@ volume_targets_get_reader:
headers: *reader_headers
assert_status: 200
+volume_targets_get_service:
+ path: '/v1/volume/targets'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# NOTE(TheJulia): Because we can't seem to get the uuid
# to load from an existing uuid, since we're not subsituting
# it, this will return with 400 due to the ID not matching.
@@ -1360,6 +1467,12 @@ volume_volume_target_id_get_reader:
headers: *reader_headers
assert_status: 200
+volume_volume_target_id_get_service:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# NOTE(TheJulia): This triggers a call to the conductor and
# thus will fail, but does not return a 403 which means success.
volume_volume_target_id_patch_admin:
@@ -1386,6 +1499,13 @@ volume_volume_target_id_patch_reader:
headers: *reader_headers
assert_status: 403
+volume_volume_target_id_patch_service:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *service_headers
+ assert_status: 503
+
volume_volume_target_id_delete_admin:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
@@ -1404,6 +1524,12 @@ volume_volume_target_id_delete_reader:
headers: *reader_headers
assert_status: 403
+volume_volume_target_id_delete_service:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 503
+
# Get Volumes by Node - https://docs.openstack.org/api-ref/baremetal/#listing-volume-resources-by-node-nodes-volume
nodes_volume_get_admin:
@@ -2002,6 +2128,12 @@ chassis_get_reader:
headers: *reader_headers
assert_status: 200
+chassis_get_service:
+ path: '/v1/chassis'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
chassis_detail_get_admin:
path: '/v1/chassis/detail'
method: get
@@ -2080,6 +2212,12 @@ chassis_chassis_id_delete_reader:
headers: *reader_headers
assert_status: 403
+chassis_chassis_id_delete_service:
+ path: '/v1/chassis/{chassis_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 403
+
# Node history entries
node_history_get_admin:
@@ -2106,6 +2244,14 @@ node_history_get_reader:
assert_list_length:
history: 1
+node_history_get_service:
+ path: '/v1/nodes/{node_ident}/history'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+ assert_list_length:
+ history: 1
+
node_history_get_entry_admin:
path: '/v1/nodes/{node_ident}/history/{history_ident}'
method: get
@@ -2137,3 +2283,9 @@ node_inventory_get_reader:
method: get
headers: *reader_headers
assert_status: 200
+
+node_history_get_entry_service:
+ path: '/v1/nodes/{node_ident}/history/{history_ident}'
+ method: get
+ headers: *service_headers
+ assert_status: 200
diff --git a/ironic/tests/unit/drivers/modules/test_inspect_utils.py b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
index 3c636a1b1..e12bbb743 100644
--- a/ironic/tests/unit/drivers/modules/test_inspect_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
@@ -18,14 +18,18 @@ from unittest import mock
from oslo_utils import importutils
+from ironic.common import context as ironic_context
from ironic.common import exception
+from ironic.common import swift
from ironic.conductor import task_manager
from ironic.drivers.modules import inspect_utils as utils
+from ironic.drivers.modules import inspector
from ironic import objects
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
sushy = importutils.try_import('sushy')
+CONF = inspector.CONF
@mock.patch('time.sleep', lambda sec: None)
@@ -88,3 +92,120 @@ class InspectFunctionTestCase(db_base.DbTestCase):
mock.call(task.context, **port_dict2)]
port_mock.assert_has_calls(expected_calls, any_order=True)
self.assertEqual(2, port_mock.return_value.create.call_count)
+
+
+class IntrospectionDataStorageFunctionsTestCase(db_base.DbTestCase):
+ fake_inventory_data = {"cpu": "amd"}
+ fake_plugin_data = {"disks": [{"name": "/dev/vda"}]}
+
+ def setUp(self):
+ super(IntrospectionDataStorageFunctionsTestCase, self).setUp()
+ self.node = obj_utils.create_test_node(self.context)
+
+ def test_store_introspection_data_db(self):
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
+ fake_introspection_data = {'inventory': self.fake_inventory_data,
+ **self.fake_plugin_data}
+ fake_context = ironic_context.RequestContext()
+ utils.store_introspection_data(self.node, fake_introspection_data,
+ fake_context)
+ stored = objects.NodeInventory.get_by_node_id(self.context,
+ self.node.id)
+ self.assertEqual(self.fake_inventory_data, stored["inventory_data"])
+ self.assertEqual(self.fake_plugin_data, stored["plugin_data"])
+
+ @mock.patch.object(utils, '_store_introspection_data_in_swift',
+ autospec=True)
+ def test_store_introspection_data_swift(self, mock_store_data):
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ CONF.set_override(
+ 'swift_data_container', 'introspection_data',
+ group='inventory')
+ fake_introspection_data = {
+ "inventory": self.fake_inventory_data, **self.fake_plugin_data}
+ fake_context = ironic_context.RequestContext()
+ utils.store_introspection_data(self.node, fake_introspection_data,
+ fake_context)
+ mock_store_data.assert_called_once_with(
+ self.node.uuid, inventory_data=self.fake_inventory_data,
+ plugin_data=self.fake_plugin_data)
+
+ def test_store_introspection_data_nostore(self):
+ CONF.set_override('data_backend', 'none', group='inventory')
+ fake_introspection_data = {
+ "inventory": self.fake_inventory_data, **self.fake_plugin_data}
+ fake_context = ironic_context.RequestContext()
+ ret = utils.store_introspection_data(self.node,
+ fake_introspection_data,
+ fake_context)
+ self.assertIsNone(ret)
+
+ def test__node_inventory_convert(self):
+ required_output = {"inventory": self.fake_inventory_data,
+ "plugin_data": self.fake_plugin_data}
+ input_given = {}
+ input_given["inventory_data"] = self.fake_inventory_data
+ input_given["plugin_data"] = self.fake_plugin_data
+ input_given["booom"] = "boom"
+ ret = utils._node_inventory_convert(input_given)
+ self.assertEqual(required_output, ret)
+
+ @mock.patch.object(utils, '_node_inventory_convert', autospec=True)
+ @mock.patch.object(objects, 'NodeInventory', spec_set=True, autospec=True)
+ def test_get_introspection_data_db(self, mock_inventory, mock_convert):
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
+ fake_introspection_data = {'inventory': self.fake_inventory_data,
+ 'plugin_data': self.fake_plugin_data}
+ fake_context = ironic_context.RequestContext()
+ mock_inventory.get_by_node_id.return_value = fake_introspection_data
+ utils.get_introspection_data(self.node, fake_context)
+ mock_convert.assert_called_once_with(fake_introspection_data)
+
+ @mock.patch.object(utils, '_get_introspection_data_from_swift',
+ autospec=True)
+ def test_get_introspection_data_swift(self, mock_get_data):
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ CONF.set_override(
+ 'swift_data_container', 'introspection_data',
+ group='inventory')
+ fake_context = ironic_context.RequestContext()
+ utils.get_introspection_data(self.node, fake_context)
+ mock_get_data.assert_called_once_with(
+ self.node.uuid)
+
+ def test_get_introspection_data_nostore(self):
+ CONF.set_override('data_backend', 'none', group='inventory')
+ fake_context = ironic_context.RequestContext()
+ self.assertRaises(
+ exception.NotFound, utils.get_introspection_data,
+ self.node, fake_context)
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test__store_introspection_data_in_swift(self, swift_api_mock):
+ container = 'introspection_data'
+ CONF.set_override('swift_data_container', container, group='inventory')
+ utils._store_introspection_data_in_swift(
+ self.node.uuid, self.fake_inventory_data, self.fake_plugin_data)
+ swift_obj_mock = swift_api_mock.return_value
+ object_name = 'inspector_data-' + str(self.node.uuid)
+ swift_obj_mock.create_object_from_data.assert_has_calls([
+ mock.call(object_name + '-inventory', self.fake_inventory_data,
+ container),
+ mock.call(object_name + '-plugin', self.fake_plugin_data,
+ container)])
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test__get_introspection_data_from_swift(self, swift_api_mock):
+ container = 'introspection_data'
+ CONF.set_override('swift_data_container', container, group='inventory')
+ swift_obj_mock = swift_api_mock.return_value
+ swift_obj_mock.get_object.side_effect = [
+ self.fake_inventory_data,
+ self.fake_plugin_data
+ ]
+ ret = utils._get_introspection_data_from_swift(self.node.uuid)
+ req_ret = {"inventory": self.fake_inventory_data,
+ "plugin_data": self.fake_plugin_data}
+ self.assertEqual(req_ret, ret)
diff --git a/ironic/tests/unit/drivers/modules/test_inspector.py b/ironic/tests/unit/drivers/modules/test_inspector.py
index 00de10189..75ccc3ebf 100644
--- a/ironic/tests/unit/drivers/modules/test_inspector.py
+++ b/ironic/tests/unit/drivers/modules/test_inspector.py
@@ -19,17 +19,14 @@ import openstack
from ironic.common import context
from ironic.common import exception
from ironic.common import states
-from ironic.common import swift
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules import inspect_utils
from ironic.drivers.modules import inspector
from ironic.drivers.modules.redfish import utils as redfish_utils
-from ironic import objects
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
-
CONF = inspector.CONF
@@ -554,55 +551,26 @@ class CheckStatusTestCase(BaseTestCase):
self.task)
self.driver.boot.clean_up_ramdisk.assert_called_once_with(self.task)
- def test_status_ok_store_inventory_in_db(self, mock_client):
- CONF.set_override('inventory_data_backend', 'database',
- group='inspector')
+ @mock.patch.object(inspect_utils, 'store_introspection_data',
+ autospec=True)
+ def test_status_ok_store_inventory(self, mock_store_data, mock_client):
mock_get = mock_client.return_value.get_introspection
mock_get.return_value = mock.Mock(is_finished=True,
error=None,
spec=['is_finished', 'error'])
- mock_get_data = mock_client.return_value.get_introspection_data
- mock_get_data.return_value = {
+ fake_introspection_data = {
"inventory": {"cpu": "amd"}, "disks": [{"name": "/dev/vda"}]}
- inspector._check_status(self.task)
- mock_get.assert_called_once_with(self.node.uuid)
- mock_get_data.assert_called_once_with(self.node.uuid, processed=True)
-
- stored = objects.NodeInventory.get_by_node_id(self.context,
- self.node.id)
- self.assertEqual({"cpu": "amd"}, stored["inventory_data"])
- self.assertEqual({"disks": [{"name": "/dev/vda"}]},
- stored["plugin_data"])
-
- @mock.patch.object(swift, 'SwiftAPI', autospec=True)
- def test_status_ok_store_inventory_in_swift(self,
- swift_api_mock, mock_client):
- CONF.set_override('inventory_data_backend', 'swift', group='inspector')
- CONF.set_override(
- 'swift_inventory_data_container', 'introspection_data',
- group='inspector')
- mock_get = mock_client.return_value.get_introspection
- mock_get.return_value = mock.Mock(is_finished=True,
- error=None,
- spec=['is_finished', 'error'])
mock_get_data = mock_client.return_value.get_introspection_data
- fake_inventory_data = {"cpu": "amd"}
- fake_plugin_data = {"disks": [{"name": "/dev/vda"}]}
- mock_get_data.return_value = {
- "inventory": fake_inventory_data, **fake_plugin_data}
- swift_obj_mock = swift_api_mock.return_value
- object_name = 'inspector_data-' + str(self.node.uuid)
+ mock_get_data.return_value = fake_introspection_data
inspector._check_status(self.task)
mock_get.assert_called_once_with(self.node.uuid)
mock_get_data.assert_called_once_with(self.node.uuid, processed=True)
- container = 'introspection_data'
- swift_obj_mock.create_object_from_data.assert_has_calls([
- mock.call(object_name + '-inventory', fake_inventory_data,
- container),
- mock.call(object_name + '-plugin', fake_plugin_data, container)])
+ mock_store_data.assert_called_once_with(self.node,
+ fake_introspection_data,
+ self.task.context)
def test_status_ok_store_inventory_nostore(self, mock_client):
- CONF.set_override('inventory_data_backend', 'none', group='inspector')
+ CONF.set_override('data_backend', 'none', group='inventory')
mock_get = mock_client.return_value.get_introspection
mock_get.return_value = mock.Mock(is_finished=True,
error=None,
@@ -613,8 +581,8 @@ class CheckStatusTestCase(BaseTestCase):
mock_get_data.assert_not_called()
def test_status_error_dont_store_inventory(self, mock_client):
- CONF.set_override('inventory_data_backend', 'database',
- group='inspector')
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
mock_get = mock_client.return_value.get_introspection
mock_get.return_value = mock.Mock(is_finished=True,
error='boom',
diff --git a/ironic/tests/unit/drivers/modules/test_snmp.py b/ironic/tests/unit/drivers/modules/test_snmp.py
index 00799dc4d..5391d7ac5 100644
--- a/ironic/tests/unit/drivers/modules/test_snmp.py
+++ b/ironic/tests/unit/drivers/modules/test_snmp.py
@@ -761,7 +761,7 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
driver = snmp._get_driver(self.node)
mock_client.get.return_value = driver.value_power_on
pstate = driver.power_on()
- mock_sleep.assert_called_once_with(1)
+ self.assertTrue(mock_sleep.called)
mock_client.set.assert_called_once_with(driver._snmp_oid(),
driver.value_power_on)
mock_client.get.assert_called_once_with(driver._snmp_oid())
@@ -775,7 +775,7 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
driver = snmp._get_driver(self.node)
mock_client.get.return_value = driver.value_power_off
pstate = driver.power_off()
- mock_sleep.assert_called_once_with(1)
+ self.assertTrue(mock_sleep.called)
mock_client.set.assert_called_once_with(driver._snmp_oid(),
driver.value_power_off)
mock_client.get.assert_called_once_with(driver._snmp_oid())
diff --git a/releasenotes/config.yaml b/releasenotes/config.yaml
new file mode 100644
index 000000000..26538010e
--- /dev/null
+++ b/releasenotes/config.yaml
@@ -0,0 +1,5 @@
+---
+# Ignore the kilo-eol tag because that branch does not work with reno
+# and contains no release notes.
+# Ignore bugfix tags because their releasenotes are covered under stable
+closed_branch_tag_re: 'r"(?!^(kilo-|bugfix-)).+-eol$"'
diff --git a/releasenotes/notes/add-service-role-support-8e9390769508ca99.yaml b/releasenotes/notes/add-service-role-support-8e9390769508ca99.yaml
new file mode 100644
index 000000000..7a2aa7b95
--- /dev/null
+++ b/releasenotes/notes/add-service-role-support-8e9390769508ca99.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Adds support for the ``service`` role, which is intended for service to
+ service communication, such as for those where ``ironic-inspector``,
+ ``nova-compute``, or ``networking-baremetal`` needs to communicate with
+ Ironic's API.
+upgrade:
+ - |
+ Ironic now has support for the ``service`` role, which is available in the
+ ``system`` scope as well as the ``project`` scope. This functionality
+ is for service to service communication, if desired. Effective access rights
+ are similar to the ``manager`` or the ``owner`` scoped admin privileges.
diff --git a/releasenotes/notes/fix-grub2-uefi-config-path-f1b4c5083cc97ee5.yaml b/releasenotes/notes/fix-grub2-uefi-config-path-f1b4c5083cc97ee5.yaml
new file mode 100644
index 000000000..ddb6c86cb
--- /dev/null
+++ b/releasenotes/notes/fix-grub2-uefi-config-path-f1b4c5083cc97ee5.yaml
@@ -0,0 +1,14 @@
+---
+fixes:
+ - |
+ Fixes the default value for the ``[DEFAULT]grub_config_path`` variable to
+ be the default path for UEFI bootloader configurations, where as the
+ default was previously the BIOS grub2 configuration path.
+upgrades:
+ - |
+ The default configuration value for ``[DEFAULT]grub_config_path`` has
+ been changed from ``/boot/grub/grub.conf`` to ``EFI/BOOT/grub.efi`` as
+ the configuration parameter was for UEFI boot configuration, and the
+ ``/boot/grub/grub2.conf`` path is for BIOS booting. This was verified
+ by referencing several working UEFI virtual media examples where this
+ value was overridden to the new configuration value.
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index af7fb4e03..6878c43c6 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -3,15 +3,16 @@
# Andi Chandler <andi@gowling.com>, 2019. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
# Andi Chandler <andi@gowling.com>, 2022. #zanata
+# Andi Chandler <andi@gowling.com>, 2023. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Ironic Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-11-04 18:03+0000\n"
+"POT-Creation-Date: 2023-02-01 23:20+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-11-04 10:34+0000\n"
+"PO-Revision-Date: 2023-02-03 04:37+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -136,9 +137,6 @@ msgstr "10.1.8"
msgid "10.1.9"
msgstr "10.1.9"
-msgid "11.0.0"
-msgstr "11.0.0"
-
msgid "11.1.0"
msgstr "11.1.0"
@@ -157,9 +155,6 @@ msgstr "11.1.4"
msgid "11.1.4-12"
msgstr "11.1.4-12"
-msgid "12.0.0"
-msgstr "12.0.0"
-
msgid "12.1.0"
msgstr "12.1.0"
@@ -184,9 +179,6 @@ msgstr "12.1.6"
msgid "12.1.6-3"
msgstr "12.1.6-3"
-msgid "12.2.0"
-msgstr "12.2.0"
-
msgid "13.0.0"
msgstr "13.0.0"
@@ -211,8 +203,8 @@ msgstr "13.0.6"
msgid "13.0.7"
msgstr "13.0.7"
-msgid "13.0.7-28"
-msgstr "13.0.7-28"
+msgid "13.0.7-29"
+msgstr "13.0.7-29"
msgid "14.0.0"
msgstr "14.0.0"
@@ -226,8 +218,8 @@ msgstr "15.0.1"
msgid "15.0.2"
msgstr "15.0.2"
-msgid "15.0.2-22"
-msgstr "15.0.2-22"
+msgid "15.0.2-25"
+msgstr "15.0.2-25"
msgid "15.1.0"
msgstr "15.1.0"
@@ -253,8 +245,8 @@ msgstr "16.0.4"
msgid "16.0.5"
msgstr "16.0.5"
-msgid "16.0.5-9"
-msgstr "16.0.5-9"
+msgid "16.0.5-11"
+msgstr "16.0.5-11"
msgid "16.1.0"
msgstr "16.1.0"
@@ -277,6 +269,9 @@ msgstr "17.0.4"
msgid "17.1.0"
msgstr "17.1.0"
+msgid "17.1.0-6"
+msgstr "17.1.0-6"
+
msgid "18.0.0"
msgstr "18.0.0"
@@ -289,8 +284,11 @@ msgstr "18.2.0"
msgid "18.2.1"
msgstr "18.2.1"
-msgid "18.2.1-31"
-msgstr "18.2.1-31"
+msgid "18.2.2"
+msgstr "18.2.2"
+
+msgid "18.2.2-6"
+msgstr "18.2.2-6"
msgid "19.0.0"
msgstr "19.0.0"
@@ -301,8 +299,11 @@ msgstr "20.0.0"
msgid "20.1.0"
msgstr "20.1.0"
-msgid "20.1.0-29"
-msgstr "20.1.0-29"
+msgid "20.1.1"
+msgstr "20.1.1"
+
+msgid "20.1.1-6"
+msgstr "20.1.1-6"
msgid "20.2.0"
msgstr "20.2.0"
@@ -313,8 +314,17 @@ msgstr "21.0.0"
msgid "21.1.0"
msgstr "21.1.0"
-msgid "21.1.0-25"
-msgstr "21.1.0-25"
+msgid "21.1.0-6"
+msgstr "21.1.0-6"
+
+msgid "21.2.0"
+msgstr "21.2.0"
+
+msgid "21.3.0"
+msgstr "21.3.0"
+
+msgid "21.3.0-4"
+msgstr "21.3.0-4"
msgid "4.0.0 First semver release"
msgstr "4.0.0 First semver release"
@@ -322,6 +332,12 @@ msgstr "4.0.0 First semver release"
msgid "4.1.0"
msgstr "4.1.0"
+msgid "4.2.0"
+msgstr "4.2.0"
+
+msgid "4.2.1"
+msgstr "4.2.1"
+
msgid "4.2.2"
msgstr "4.2.2"
@@ -551,21 +567,6 @@ msgstr "A few major changes since 9.1.x (Pike) are worth mentioning:"
msgid ""
"A future release will change the default value of ``[deploy]/"
-"default_boot_mode`` from \"bios\" to \"uefi\". It is recommended to set an "
-"explicit value for this option. For hardware types which don't support "
-"setting boot mode, a future release will assume boot mode is set to UEFI if "
-"no boot mode is set to node's capabilities. It is also recommended to set "
-"``boot_mode`` into ``properties/capabilities`` of a node."
-msgstr ""
-"A future release will change the default value of ``[deploy]/"
-"default_boot_mode`` from \"bios\" to \"uefi\". It is recommended to set an "
-"explicit value for this option. For hardware types which don't support "
-"setting boot mode, a future release will assume boot mode is set to UEFI if "
-"no boot mode is set to node's capabilities. It is also recommended to set "
-"``boot_mode`` into ``properties/capabilities`` of a node."
-
-msgid ""
-"A future release will change the default value of ``[deploy]/"
"default_boot_option`` from \"netboot\" to \"local\". To avoid disruptions, "
"it is recommended to set an explicit value for this option."
msgstr ""
@@ -867,10 +868,6 @@ msgstr ""
"Driver needs this verification because the machine is going to use a MAC "
"that will only be specified at the profile application."
-msgid "A warning is logged for any changes to immutable configuration options."
-msgstr ""
-"A warning is logged for any changes to immutable configuration options."
-
msgid "API fields to support node ``description`` and ``owner`` values."
msgstr "API fields to support node ``description`` and ``owner`` values."
@@ -882,20 +879,6 @@ msgstr ""
"net/ironic/+bug/1536828 for details."
msgid ""
-"API version 1.57 adds a REST API endpoint for updating an existing "
-"allocation. Only ``name`` and ``extra`` fields are allowed to be updated."
-msgstr ""
-"API version 1.57 adds a REST API endpoint for updating an existing "
-"allocation. Only ``name`` and ``extra`` fields are allowed to be updated."
-
-msgid ""
-"API version 1.58 allows backfilling allocations for existing deployed nodes "
-"by providing ``node`` to ``POST /v1/allocations``."
-msgstr ""
-"API version 1.58 allows backfilling allocations for existing deployed nodes "
-"by providing ``node`` to ``POST /v1/allocations``."
-
-msgid ""
"Ability to create an allocation has been restricted by a new policy rule "
"``baremetal::allocation::create_pre_rbac`` which prevents creation of "
"allocations by any project administrator when operating with the new Role "
@@ -941,13 +924,15 @@ msgid "Add Wake-On-Lan Power Driver"
msgstr "Add Wake-On-LAN Power Driver"
msgid ""
-"Add ``?detail=`` boolean query to the API list endpoints to provide a more "
-"RESTful alternative to the existing ``/nodes/detail`` and similar endpoints. "
-"The default is False. Now these API requests are possible:"
+"Add ``anaconda`` deploy interface to Ironic. This driver will deploy the OS "
+"using anaconda installer and kickstart file instead of IPA. To support this "
+"feature a new configuration group ``anaconda`` is added to Ironic "
+"configuration file along with ``default_ks_template`` configuration option."
msgstr ""
-"Add ``?detail=`` boolean query to the API list endpoints to provide a more "
-"RESTful alternative to the existing ``/nodes/detail`` and similar endpoints. "
-"The default is False. Now these API requests are possible:"
+"Add ``anaconda`` deploy interface to Ironic. This driver will deploy the OS "
+"using Anaconda installer and kickstart file instead of IPA. To support this "
+"feature a new configuration group ``anaconda`` is added to Ironic "
+"configuration file along with ``default_ks_template`` configuration option."
msgid ""
"Add ``choices`` parameter to config options. Invalid values will be rejected "
@@ -1101,6 +1086,17 @@ msgstr ""
"images if ``image_type`` is set to ``partition`` and local boot is used."
msgid ""
+"Adding new clean steps to ``ilo`` and ``ilo5`` hardware type - "
+"``security_parameters_update``, ``update_minimum_password_length``, and "
+"``update_auth_failure_logging_threshold`` which allows users to modify ilo "
+"system security settings."
+msgstr ""
+"Adding new clean steps to ``ilo`` and ``ilo5`` hardware type - "
+"``security_parameters_update``, ``update_minimum_password_length``, and "
+"``update_auth_failure_logging_threshold`` which allows users to modify ilo "
+"system security settings."
+
+msgid ""
"Addition of the provision state target verb of ``adopt`` which allows an "
"operator to move a node into an ``active`` state from ``manageable`` state, "
"without performing a deployment operation on the node. This can be used to "
@@ -1211,15 +1207,6 @@ msgstr ""
"udp_transport_timeout`` allow to change the number of retries and the "
"timeout values respectively for the the SNMP driver."
-msgid ""
-"Adds SNMPv3 message authentication and encryption features to ironic "
-"``snmp`` hardware type. To enable these features, the following parameters "
-"should be used in the node's ``driver_info``:"
-msgstr ""
-"Adds SNMPv3 message authentication and encryption features to ironic "
-"``snmp`` hardware type. To enable these features, the following parameters "
-"should be used in the node's ``driver_info``:"
-
msgid "Adds ShellinaboxConsole support for virsh SSH driver."
msgstr "Adds ShellinaboxConsole support for virsh SSH driver."
@@ -1288,9 +1275,6 @@ msgstr "Adds ``bios_interface`` to the node list and node show api-ref."
msgid "Adds ``bios_interface`` to the node validate api-ref."
msgstr "Adds ``bios_interface`` to the node validate api-ref."
-msgid "Adds ``bios`` interface to the ``redfish`` hardware type."
-msgstr "Adds ``bios`` interface to the ``redfish`` hardware type."
-
msgid ""
"Adds ``command_timeout`` and ``max_command_attempts`` configuration options "
"to IPA, so when connection errors occur the command will be executed again."
@@ -1299,15 +1283,6 @@ msgstr ""
"to IPA, so when connection errors occur the command will be executed again."
msgid ""
-"Adds ``command_timeout`` and ``max_command_attempts`` configuration options "
-"to IPA, so when connection errors occur the command will be executed again. "
-"The options are located in the ``[agent]`` section."
-msgstr ""
-"Adds ``command_timeout`` and ``max_command_attempts`` configuration options "
-"to IPA, so when connection errors occur the command will be executed again. "
-"The options are located in the ``[agent]`` section."
-
-msgid ""
"Adds ``driver_internal_info`` field to the node-related notification "
"``baremetal.node.provision_set.*``, new payload version 1.16."
msgstr ""
@@ -1315,28 +1290,6 @@ msgstr ""
"``baremetal.node.provision_set.*``, new payload version 1.16."
msgid ""
-"Adds ``external`` storage interface which is short for \"externally managed"
-"\". This adds logic to allow the Bare Metal service to identify when a BFV "
-"scenario is being requested based upon the configuration set for ``volume "
-"targets``."
-msgstr ""
-"Adds ``external`` storage interface which is short for \"externally managed"
-"\". This adds logic to allow the Bare Metal service to identify when a BFV "
-"scenario is being requested based upon the configuration set for ``volume "
-"targets``."
-
-msgid ""
-"Adds ``get_boot_mode``, ``set_boot_mode`` and ``get_supported_boot_modes`` "
-"methods to driver management interface. Drivers can override these methods "
-"implementing boot mode management calls to the BMC of the baremetal nodes "
-"being managed."
-msgstr ""
-"Adds ``get_boot_mode``, ``set_boot_mode`` and ``get_supported_boot_modes`` "
-"methods to driver management interface. Drivers can override these methods "
-"implementing boot mode management calls to the BMC of the baremetal nodes "
-"being managed."
-
-msgid ""
"Adds ``idrac`` hardware type support of a virtual media boot interface "
"implementation that utilizes the Redfish out-of-band (OOB) management "
"protocol and is compatible with the integrated Dell Remote Access Controller "
@@ -1415,17 +1368,6 @@ msgid ""
msgstr ""
"Adds ``rescue_interface`` field to the following node-related notifications:"
-msgid ""
-"Adds ``reset_idrac`` and ``known_good_state`` cleaning steps to hardware "
-"type ``idrac``. ``reset_idrac`` actually resets the iDRAC; "
-"``known_good_state`` also resets the iDRAC and clears the Lifecycle "
-"Controller job queue to make sure the iDRAC is in good state."
-msgstr ""
-"Adds ``reset_idrac`` and ``known_good_state`` cleaning steps to hardware "
-"type ``idrac``. ``reset_idrac`` actually resets the iDRAC; "
-"``known_good_state`` also resets the iDRAC and clears the Lifecycle "
-"Controller job queue to make sure the iDRAC is in good state."
-
msgid "Adds ``storage_interface`` field to the node-related notifications:"
msgstr "Adds ``storage_interface`` field to the node-related notifications:"
@@ -1485,27 +1427,6 @@ msgstr ""
"notifications."
msgid ""
-"Adds a ``[conductor]send_sensor_data_for_undeployed_nodes`` option to enable "
-"ironic to collect and transmit sensor data for all nodes for which sensor "
-"data collection is available. By default, this option is not enabled which "
-"aligns with the prior behavior of sensor data collection and transmission "
-"where such data was only collected if an ``instance_uuid`` was present to "
-"signify that the node has been or is being deployed. With this option set to "
-"``True``, operators may be able to identify hardware in a faulty state "
-"through the sensor data and take action before an instance workload is "
-"deployed."
-msgstr ""
-"Adds a ``[conductor]send_sensor_data_for_undeployed_nodes`` option to enable "
-"ironic to collect and transmit sensor data for all nodes for which sensor "
-"data collection is available. By default, this option is not enabled which "
-"aligns with the prior behaviour of sensor data collection and transmission "
-"where such data was only collected if an ``instance_uuid`` was present to "
-"signify that the node has been or is being deployed. With this option set to "
-"``True``, operators may be able to identify hardware in a faulty state "
-"through the sensor data and take action before an instance workload is "
-"deployed."
-
-msgid ""
"Adds a ``clear_job_queue`` cleaning step to the ``idrac-wsman`` management "
"interface. The ``clear_job_queue`` cleaning step clears the Lifecycle "
"Controller job queue including any pending jobs."
@@ -1639,38 +1560,6 @@ msgstr ""
"return tracebacks in API responses in an error condition."
msgid ""
-"Adds a configuration option ``[deploy]disk_erasure_concurrency`` to define "
-"the target pool size used by Ironic Python Agent ramdisk to erase disk "
-"devices. The number of threads created by IPA to erase disk devices is the "
-"minimum value of target pool size and the number of disks to be erased. This "
-"feature can greatly reduce the operation time for baremetals with multiple "
-"disks. For the backwards compatibility, the default value is 1."
-msgstr ""
-"Adds a configuration option ``[deploy]disk_erasure_concurrency`` to define "
-"the target pool size used by Ironic Python Agent ramdisk to erase disk "
-"devices. The number of threads created by IPA to erase disk devices is the "
-"minimum value of target pool size and the number of disks to be erased. This "
-"feature can greatly reduce the operation time for baremetals with multiple "
-"disks. For the backwards compatibility, the default value is 1."
-
-msgid ""
-"Adds a configuration option ``[ipmi]disable_boot_timeout`` which is used to "
-"set the default behavior whether ironic should send a raw IPMI command to "
-"disable timeout. This configuration option can be overidden by the per-node "
-"option ``ipmi_disable_boot_timeout`` in node's ``driver_info`` field. See "
-"`story 2004266 <https://storyboard.openstack.org/#!/story/2004266>`_ and "
-"`Story 2002977 <https://storyboard.openstack.org/#!/story/2002977>`_ for "
-"additional information."
-msgstr ""
-"Adds a configuration option ``[ipmi]disable_boot_timeout`` which is used to "
-"set the default behaviour whether ironic should send a raw IPMI command to "
-"disable timeout. This configuration option can be overridden by the per-node "
-"option ``ipmi_disable_boot_timeout`` in node's ``driver_info`` field. See "
-"`story 2004266 <https://storyboard.openstack.org/#!/story/2004266>`_ and "
-"`Story 2002977 <https://storyboard.openstack.org/#!/story/2002977>`_ for "
-"additional information."
-
-msgid ""
"Adds a configuration option ``webserver_verify_ca`` to support custom "
"certificates to validate URLs hosted on a HTTPS webserver."
msgstr ""
@@ -1770,30 +1659,6 @@ msgstr ""
"allocated from the configured port range for further use."
msgid ""
-"Adds a new configuration option ``[disk_utils]partprobe_attempts`` which "
-"defaults to 10. This is the maximum number of times to try to read a "
-"partition (if creating a config drive) via a ``partprobe`` command. "
-"Previously, no retries were done which caused failures. This addresses `bug "
-"1756760 <https://storyboard.openstack.org/#!/story/1756760>`_."
-msgstr ""
-"Adds a new configuration option ``[disk_utils]partprobe_attempts`` which "
-"defaults to 10. This is the maximum number of times to try to read a "
-"partition (if creating a config drive) via a ``partprobe`` command. "
-"Previously, no retries were done which caused failures. This addresses `bug "
-"1756760 <https://storyboard.openstack.org/#!/story/1756760>`_."
-
-msgid ""
-"Adds a new configuration option ``[disk_utils]partprobe_attempts`` which "
-"defaults to 10. This is the maximum number of times to try to read a "
-"partition (if creating a config drive) via a ``partprobe`` command. Set it "
-"to 1 if you want the previous behavior, where no retries were done."
-msgstr ""
-"Adds a new configuration option ``[disk_utils]partprobe_attempts`` which "
-"defaults to 10. This is the maximum number of times to try to read a "
-"partition (if creating a config drive) via a ``partprobe`` command. Set it "
-"to 1 if you want the previous behaviour, where no retries were done."
-
-msgid ""
"Adds a new configuration option ``[drac]boot_device_job_status_timeout`` "
"that specifies the maximum amount of time (in seconds) to wait for the boot "
"device configuration job to transition to the scheduled state to allow a "
@@ -1890,45 +1755,6 @@ msgstr ""
"v1/drivers/<name>."
msgid ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. Caution should be taken due to the timeout monitoring is "
-"shifted from ``inspecting`` to ``inspect wait``, please stop all running "
-"asynchronous hardware inspection or wait until it is finished before "
-"upgrading to the Rocky release. Otherwise nodes in asynchronous inspection "
-"will be left at ``inspecting`` state forever unless the database is manually "
-"updated."
-msgstr ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. Caution should be taken due to the timeout monitoring is "
-"shifted from ``inspecting`` to ``inspect wait``, please stop all running "
-"asynchronous hardware inspection or wait until it is finished before "
-"upgrading to the Rocky release. Otherwise nodes in asynchronous inspection "
-"will be left at ``inspecting`` state forever unless the database is manually "
-"updated."
-
-msgid ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. Returning ``INSPECTING`` from the ``inspect_hardware`` method "
-"of inspect interface is deprecated, ``INSPECTWAIT`` should be returned "
-"instead."
-msgstr ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. Returning ``INSPECTING`` from the ``inspect_hardware`` method "
-"of inspect interface is deprecated, ``INSPECTWAIT`` should be returned "
-"instead."
-
-msgid ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. The ``[conductor]inspect_timeout`` configuration option is "
-"deprecated for removal, please use ``[conductor]inspect_wait_timeout`` "
-"instead to specify the timeout of inspection process."
-msgstr ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. The ``[conductor]inspect_timeout`` configuration option is "
-"deprecated for removal, please use ``[conductor]inspect_wait_timeout`` "
-"instead to specify the timeout of inspection process."
-
-msgid ""
"Adds an `agent_iboot` driver to allow use of the Iboot power driver with the "
"Agent deploy driver."
msgstr ""
@@ -2001,6 +1827,9 @@ msgstr "Current Series Release Notes"
msgid "Deprecated the 'parallel' option to periodic task decorator"
msgstr "Deprecated the 'parallel' option to periodic task decorator"
+msgid "Deprecated the bash ramdisk"
+msgstr "Deprecated the bash ramdisk"
+
msgid ""
"Drivers may optionally add a new BootInterface. This is merely a refactoring "
"of the Driver API to support future improvements."
@@ -2008,15 +1837,42 @@ msgstr ""
"Drivers may optionally add a new BootInterface. This is merely a refactoring "
"of the Driver API to support future improvements."
+msgid ""
+"Drivers using the \"agent\" deploy mechanism do not support \"rebuild --"
+"preserve-ephemeral\""
+msgstr ""
+"Drivers using the \"agent\" deploy mechanism do not support \"rebuild --"
+"preserve-ephemeral\""
+
+msgid ""
+"Fix a couple of locale issues with deployments, when running on a system "
+"using the Japanese locale"
+msgstr ""
+"Fix a couple of locale issues with deployments, when running on a system "
+"using the Japanese locale"
+
+msgid ""
+"IPMI Passwords are now obfuscated in REST API responses. This may be "
+"disabled by changing API policy settings."
+msgstr ""
+"IPMI Passwords are now obfuscated in REST API responses. This may be "
+"disabled by changing API policy settings."
+
msgid "Implemented a new Boot interface for drivers"
msgstr "Implemented a new Boot interface for drivers"
+msgid "Import Japanese translations - our first major translation addition!"
+msgstr "Import Japanese translations - our first major translation addition!"
+
msgid "Introduce new BootInterface to the Driver API"
msgstr "Introduce new BootInterface to the Driver API"
msgid "Known issues"
msgstr "Known issues"
+msgid "Liberty Series (4.0.0 - 4.2.5) Release Notes"
+msgstr "Liberty Series (4.0.0 - 4.2.5) Release Notes"
+
msgid "Migrations from Nova \"baremetal\" have been removed"
msgstr "Migrations from Nova \"baremetal\" have been removed"
@@ -2091,6 +1947,13 @@ msgid "Support for the new ENROLL workflow during Node creation"
msgstr "Support for the new ENROLL workflow during Node creation"
msgid ""
+"The \"agent\" class of drivers now support both whole-disk and partition "
+"based images."
+msgstr ""
+"The \"agent\" class of drivers now support both whole-disk and partition "
+"based images."
+
+msgid ""
"The Ironic API now has support for CORS requests, that may be used by, for "
"example, web browser-based clients. This is configured in the [cors] section "
"of ironic.conf."
@@ -2161,6 +2024,24 @@ msgstr ""
"2015.1. Full release details are available on Launchpad: https://launchpad."
"net/ironic/liberty/4.0.0."
+msgid ""
+"This release is a patch release on top of 4.2.0, as part of the stable "
+"Liberty series. Full details are available on Launchpad: https://launchpad."
+"net/ironic/liberty/4.2.1."
+msgstr ""
+"This release is a patch release on top of 4.2.0, as part of the stable "
+"Liberty series. Full details are available on Launchpad: https://launchpad."
+"net/ironic/liberty/4.2.1."
+
+msgid ""
+"This release is proposed as the stable Liberty release for Ironic, and "
+"brings with it some bug fixes and small features. Full release details are "
+"available on Launchpad: https://launchpad.net/ironic/liberty/4.2.0."
+msgstr ""
+"This release is proposed as the stable Liberty release for Ironic, and "
+"brings with it some bug fixes and small features. Full release details are "
+"available on Launchpad: https://launchpad.net/ironic/liberty/4.2.0."
+
msgid "Train Series (12.2.0 - 13.0.x) Release Notes"
msgstr "Train Series (12.2.0 - 13.0.x) Release Notes"
@@ -2173,6 +2054,15 @@ msgstr "Victoria Series (15.1.0 - 16.0.x) Release Notes"
msgid "Wallaby Series (16.1.0 - 17.0.x) Release Notes"
msgstr "Wallaby Series (16.1.0 - 17.0.x) Release Notes"
+msgid ""
+"While Ironic does include a ClusteredComputeManager, which allows running "
+"more than one nova-compute process with Ironic, it should be considered "
+"experimental and has many known problems."
+msgstr ""
+"While Ironic does include a ClusteredComputeManager, which allows running "
+"more than one nova-compute process with Ironic, it should be considered "
+"experimental and has many known problems."
+
msgid "Xena Series (18.0.0 - 18.2.x) Release Notes"
msgstr "Xena Series (18.0.0 - 18.2.x) Release Notes"
diff --git a/reno.yaml b/reno.yaml
deleted file mode 100644
index dd0aac790..000000000
--- a/reno.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# Ignore the kilo-eol tag because that branch does not work with reno
-# and contains no release notes.
-closed_branch_tag_re: "(.+)(?<!kilo)-eol"
diff --git a/requirements.txt b/requirements.txt
index 8a57727ec..0c73e632e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -39,7 +39,7 @@ rfc3986>=1.2.0 # Apache-2.0
jsonpatch!=1.20,>=1.16 # BSD
Jinja2>=3.0.0 # BSD License (3 clause)
keystonemiddleware>=9.5.0 # Apache-2.0
-oslo.messaging>=5.29.0 # Apache-2.0
+oslo.messaging>=14.1.0 # Apache-2.0
tenacity>=6.2.0 # Apache-2.0
oslo.versionedobjects>=1.31.2 # Apache-2.0
jsonschema>=3.2.0 # MIT
diff --git a/zuul.d/ironic-jobs.yaml b/zuul.d/ironic-jobs.yaml
index c59c54e86..ca1757417 100644
--- a/zuul.d/ironic-jobs.yaml
+++ b/zuul.d/ironic-jobs.yaml
@@ -250,9 +250,9 @@
# a small root partition, so use /opt which is mounted from a bigger
# ephemeral partition on such nodes
LIBVIRT_STORAGE_POOL_PATH: /opt/libvirt/images
- IRONIC_ANACONDA_IMAGE_REF: http://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/
- IRONIC_ANACONDA_KERNEL_REF: http://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/pxeboot/vmlinuz
- IRONIC_ANACONDA_RAMDISK_REF: http://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/pxeboot/initrd.img
+ IRONIC_ANACONDA_IMAGE_REF: https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/
+ IRONIC_ANACONDA_KERNEL_REF: https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/pxeboot/vmlinuz
+ IRONIC_ANACONDA_RAMDISK_REF: https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/pxeboot/initrd.img
IRONIC_ANACONDA_INSECURE_HEARTBEAT: True
IRONIC_DEPLOY_CALLBACK_WAIT_TIMEOUT: 3600
IRONIC_PXE_BOOT_RETRY_TIMEOUT: 3600