summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--devstack/lib/ironic21
-rw-r--r--devstack/plugin.sh2
-rwxr-xr-xdevstack/upgrade/upgrade.sh2
-rw-r--r--doc/source/admin/anaconda-deploy-interface.rst40
-rw-r--r--doc/source/admin/troubleshooting.rst38
-rw-r--r--ironic/api/controllers/v1/port.py8
-rw-r--r--ironic/common/release_mappings.py2
-rw-r--r--ironic/common/swift.py25
-rw-r--r--ironic/conf/inspector.py11
-rw-r--r--ironic/db/sqlalchemy/api.py4
-rw-r--r--ironic/db/sqlalchemy/models.py10
-rw-r--r--ironic/drivers/modules/inspector.py46
-rw-r--r--ironic/drivers/modules/redfish/raid.py8
-rw-r--r--ironic/objects/port.py8
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_port.py29
-rw-r--r--ironic/tests/unit/db/utils.py2
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_raid.py4
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspector.py46
-rw-r--r--ironic/tests/unit/objects/test_objects.py2
-rw-r--r--ironic/tests/unit/objects/test_port.py12
-rw-r--r--releasenotes/notes/fix-nonetype-object-is-not-iterable-0592926d890d6c11.yaml7
-rw-r--r--zuul.d/ironic-jobs.yaml3
22 files changed, 266 insertions, 64 deletions
diff --git a/devstack/lib/ironic b/devstack/lib/ironic
index aae8ca5cf..17ba547f1 100644
--- a/devstack/lib/ironic
+++ b/devstack/lib/ironic
@@ -586,7 +586,7 @@ TEMPEST_BAREMETAL_MIN_MICROVERSION=${TEMPEST_BAREMETAL_MIN_MICROVERSION:-}
TEMPEST_BAREMETAL_MAX_MICROVERSION=${TEMPEST_BAREMETAL_MAX_MICROVERSION:-}
# TODO(TheJulia): This PHYSICAL_NETWORK needs to be refactored in
-# our devstack plugin. It is used by the neutron-legacy integration,
+# our devstack plugin. It is used by the neutron integration,
# however they want to name the new variable for the current neutron
# plugin NEUTRON_PHYSICAL_NETWORK. For now we'll do some magic and
# change it later once we migrate our jobs.
@@ -594,7 +594,7 @@ TEMPEST_BAREMETAL_MAX_MICROVERSION=${TEMPEST_BAREMETAL_MAX_MICROVERSION:-}
PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-${PHYSICAL_NETWORK:-}}
# Ramdisk ISO image for Ramdisk Virtual Media/iPXE testing
-IRONIC_RAMDISK_IMAGE=${IRONIC_RAMDISK_IMAGE:-http://tinycorelinux.net/10.x/x86/archive/10.0/Core-10.0.iso}
+IRONIC_RAMDISK_IMAGE=${IRONIC_RAMDISK_IMAGE:-http://tinycorelinux.net/13.x/x86/archive/13.0/Core-13.0.iso}
IRONIC_LOADER_PATHS=${IRONIC_LOADER_PATHS:-}
@@ -2376,6 +2376,9 @@ function enroll_nodes {
local ironic_node_disk=$IRONIC_VM_SPECS_DISK
local ironic_ephemeral_disk=$IRONIC_VM_EPHEMERAL_DISK
local ironic_node_arch=x86_64
+ if [[ ! -f $IRONIC_VM_MACS_CSV_FILE ]]; then
+ touch $IRONIC_VM_MACS_CSV_FILE
+ fi
local ironic_hwinfo_file=$IRONIC_VM_MACS_CSV_FILE
if is_deployed_by_ipmi; then
@@ -2943,8 +2946,16 @@ function upload_baremetal_ironic_efiboot {
sudo mkdir -p $efiboot_mount/efi/boot
- sudo cp "$IRONIC_GRUB2_SHIM_FILE" $efiboot_mount/efi/boot/bootx64.efi
- sudo cp "$IRONIC_GRUB2_FILE" $efiboot_mount/efi/boot/grubx64.efi
+ if [[ "$IRONIC_GRUB2_SHIM_FILE" =~ "http".* ]]; then
+ sudo wget "$IRONIC_GRUB2_SHIM_FILE" -O $efiboot_mount/efi/boot/bootx64.efi
+ else
+ sudo cp "$IRONIC_GRUB2_SHIM_FILE" $efiboot_mount/efi/boot/bootx64.efi
+ fi
+ if [[ "$IRONIC_GRUB2_FILE" =~ "http".* ]]; then
+ sudo wget "$IRONIC_GRUB2_FILE" -O $efiboot_mount/efi/boot/grubx64.efi
+ else
+ sudo cp "$IRONIC_GRUB2_FILE" $efiboot_mount/efi/boot/grubx64.efi
+ fi
sudo umount $efiboot_mount
@@ -2981,7 +2992,7 @@ function upload_baremetal_ironic_efiboot {
# NOTE(dtantsur): this is likely incorrect
efi_grub_path=EFI/BOOT/grub.cfg
fi
- iniset $IRONIC_CONF_FILE DEFAULT grub_config_path $efi_grub_path
+ iniset $IRONIC_CONF_FILE DEFAULT grub_config_path ${IRONIC_GRUB2_CONFIG_PATH:-$efi_grub_path}
}
# build deploy kernel+ramdisk, then upload them to glance
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index f49c63d38..306569f51 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -15,7 +15,7 @@ if is_service_enabled ir-api ir-cond; then
echo_summary "Installing Ironic"
if ! is_service_enabled nova; then
- source $RC_DIR/lib/nova_plugins/functions-libvirt
+ source $TOP_DIR/lib/nova_plugins/functions-libvirt
install_libvirt
fi
install_ironic
diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh
index 7801ccd26..a3d51696a 100755
--- a/devstack/upgrade/upgrade.sh
+++ b/devstack/upgrade/upgrade.sh
@@ -40,7 +40,7 @@ set -o errexit
source $TARGET_DEVSTACK_DIR/stackrc
source $TARGET_DEVSTACK_DIR/lib/tls
source $TARGET_DEVSTACK_DIR/lib/nova
-source $TARGET_DEVSTACK_DIR/lib/neutron-legacy
+source $TARGET_DEVSTACK_DIR/lib/neutron
source $TARGET_DEVSTACK_DIR/lib/apache
source $TARGET_DEVSTACK_DIR/lib/keystone
diff --git a/doc/source/admin/anaconda-deploy-interface.rst b/doc/source/admin/anaconda-deploy-interface.rst
index f48926668..2b7195525 100644
--- a/doc/source/admin/anaconda-deploy-interface.rst
+++ b/doc/source/admin/anaconda-deploy-interface.rst
@@ -271,16 +271,44 @@ purposes.
``liveimg`` which is used as the base operating system image to
start with.
+Configuration Considerations
+----------------------------
+
+When using the ``anaconda`` deployment interface, some configuration
+parameters may need to be adjusted in your environment. This is in large
+part due to the general defaults being set to much lower values for image
+based deployments, but the way the anaconda deployment interface works,
+you may need to make some adjustments.
+
+* ``[conductor]deploy_callback_timeout`` likely needs to be adjusted
+ for most ``anaconda`` deployment interface users. By default this
+ is a timer which looks for "agents" which have not checked in with
+ Ironic, or agents which may have crashed or failed after they
+ started. If the value is reached, then the current operation is failed.
+ This value should be set to a number of seconds which exceeds your
+ average anaconda deployment time.
+* ``[pxe]boot_retry_timeout`` can also be triggered and result in
+ an anaconda deployment in progress getting reset as it is intended
+ to reboot nodes which might have failed their initial PXE operation.
+ Depending on sizes of images, and the exact nature of what was deployed,
+ it may be necessary to ensure this is a much higher value.
+
Limitations
-----------
-This deploy interface has only been tested with Red Hat based operating systems
-that use anaconda. Other systems are not supported.
+* This deploy interface has only been tested with Red Hat based operating
+ systems that use anaconda. Other systems are not supported.
+
+* Runtime TLS certifiate injection into ramdisks is not supported. Assets
+ such as ``ramdisk`` or a ``stage2`` ramdisk image need to have trusted
+ Certificate Authority certificates present within the images *or* the
+ Ironic API endpoint utilized should utilize a known trusted Certificate
+ Authority.
-Runtime TLS certifiate injection into ramdisks is not supported. Assets such
-as ``ramdisk`` or a ``stage2`` ramdisk image need to have trusted Certificate
-Authority certificates present within the images *or* the Ironic API endpoint
-utilized should utilize a known trusted Certificate Authority.
+* The ``anaconda`` tooling deploying the instance/workload does not
+ heartbeat to Ironic like the ``ironic-python-agent`` driven ramdisks.
+ As such, you may need to adjust some timers. See
+ `Configuration Considerations`_ for some details on this.
.. _`anaconda`: https://fedoraproject.org/wiki/Anaconda
.. _`ks.cfg.template`: https://opendev.org/openstack/ironic/src/branch/master/ironic/drivers/modules/ks.cfg.template
diff --git a/doc/source/admin/troubleshooting.rst b/doc/source/admin/troubleshooting.rst
index 2791430fd..1ec127831 100644
--- a/doc/source/admin/troubleshooting.rst
+++ b/doc/source/admin/troubleshooting.rst
@@ -1010,3 +1010,41 @@ upper limit to the setting.
This was an infrastructure operator requested feature from actual lessons
learned in the operation of Ironic in large scale production. The defaults
may not be suitable for the largest scale operators.
+
+Why do I have an error that an NVMe Partition is not a block device?
+====================================================================
+
+In some cases, you can encounter an error that suggests a partition that has
+been created on an NVMe block device, is not a block device.
+
+Example:
+
+ lsblk: /dev/nvme0n1p2: not a block device
+
+What has happened is the partition contains a partition table inside of it
+which is confusing the NVMe device interaction. While basically valid in
+some cases to have nested partition tables, for example, with software
+raid, in the NVMe case the driver and possibly the underlying device gets
+quite confused. This is in part because partitions in NVMe devices are higher
+level abstracts.
+
+The way this occurs is you likely had a ``whole-disk`` image, and it was
+configured as a partition image. If using glance, your image properties
+may have a ``img_type`` field, which should be ``whole-disk``, or you
+have a ``kernel_id`` and ``ramdisk_id`` value in the glance image
+``properties`` field. Definition of a kernel and ramdisk value also
+indicates that the image is of a ``partition`` image type. This is because
+a ``whole-disk`` image is bootable from the contents within the image,
+and partition images are unable to be booted without a kernel, and ramdisk.
+
+If you are using Ironic in standalone mode, the optional
+``instance_info/image_type`` setting may be advisable to be checked.
+Very similar to Glance usage above, if you have set Ironic's node level
+``instance_info/kernel`` and ``instance_info/ramdisk`` parameters, Ironic
+will proceed with deploying an image as if it is a partition image, and
+create a partition table on the new block device, and then write the
+contents of the image into the newly created partition.
+
+.. NOTE::
+ As a general reminder, the Ironic community recommends the use of
+ whole disk images over the use of partition images.
diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py
index adc21ebc2..d70c13f27 100644
--- a/ironic/api/controllers/v1/port.py
+++ b/ironic/api/controllers/v1/port.py
@@ -123,9 +123,9 @@ def convert_with_links(rpc_port, fields=None, sanitize=True):
'local_link_connection',
'physical_network',
'pxe_enabled',
+ 'node_uuid',
)
)
- api_utils.populate_node_uuid(rpc_port, port)
if rpc_port.portgroup_id:
pg = objects.Portgroup.get(api.request.context, rpc_port.portgroup_id)
port['portgroup_uuid'] = pg.uuid
@@ -166,12 +166,10 @@ def list_convert_with_links(rpc_ports, limit, url, fields=None, **kwargs):
try:
port = convert_with_links(rpc_port, fields=fields,
sanitize=False)
- except exception.NodeNotFound:
# NOTE(dtantsur): node was deleted after we fetched the port
# list, meaning that the port was also deleted. Skip it.
- LOG.debug('Skipping port %s as its node was deleted',
- rpc_port.uuid)
- continue
+ if port['node_uuid'] is None:
+ continue
except exception.PortgroupNotFound:
# NOTE(dtantsur): port group was deleted after we fetched the
# port list, it may mean that the port was deleted too, but
diff --git a/ironic/common/release_mappings.py b/ironic/common/release_mappings.py
index 555ff164d..76c40fc2f 100644
--- a/ironic/common/release_mappings.py
+++ b/ironic/common/release_mappings.py
@@ -523,7 +523,7 @@ RELEASE_MAPPING = {
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
- 'Port': ['1.10'],
+ 'Port': ['1.11'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
diff --git a/ironic/common/swift.py b/ironic/common/swift.py
index 8a98c32d2..dde94fb18 100644
--- a/ironic/common/swift.py
+++ b/ironic/common/swift.py
@@ -111,6 +111,31 @@ class SwiftAPI(object):
return obj_uuid
+ def create_object_from_data(self, object, data, container):
+ """Uploads a given string to Swift.
+
+ :param object: The name of the object in Swift
+ :param data: string data to put in the object
+ :param container: The name of the container for the object.
+ Defaults to the value set in the configuration options.
+ :returns: The Swift UUID of the object
+ :raises: utils.Error, if any operation with Swift fails.
+ """
+ try:
+ self.connection.put_container(container)
+ except swift_exceptions.ClientException as e:
+ operation = _("put container")
+ raise exception.SwiftOperationError(operation=operation, error=e)
+
+ try:
+ obj_uuid = self.connection.create_object(
+ container, object, data=data)
+ except swift_exceptions.ClientException as e:
+ operation = _("put object")
+ raise exception.SwiftOperationError(operation=operation, error=e)
+
+ return obj_uuid
+
def get_temp_url(self, container, obj, timeout):
"""Returns the temp url for the given Swift object.
diff --git a/ironic/conf/inspector.py b/ironic/conf/inspector.py
index a7f89c994..bee6a3e67 100644
--- a/ironic/conf/inspector.py
+++ b/ironic/conf/inspector.py
@@ -39,6 +39,17 @@ opts = [
'managed by ironic. Set this to True if your '
'installation of ironic-inspector does not have a '
'separate PXE boot environment.')),
+ cfg.StrOpt('inventory_data_backend',
+ help=_('The storage backend for storing introspection data.'),
+ choices=[('none', _('introspection data will not be stored')),
+ ('database', _('introspection data stored in an SQL '
+ 'database')),
+ ('swift', _('introspection data stored in Swift'))],
+ default='database'),
+ cfg.StrOpt('swift_inventory_data_container',
+ default='introspection_data_container',
+ help=_('The Swift introspection data container to store '
+ 'the inventory data.')),
]
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index a0cefea35..7a6e6862e 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -980,9 +980,7 @@ class Connection(api.Connection):
sort_key=None, sort_dir=None, owner=None,
project=None):
query = sa.select(models.Port).where(
- models.Port.portgroup_id == portgroup_id
- )
-
+ models.Port.portgroup_id == portgroup_id)
if owner:
query = add_port_filter_by_node_owner(query, owner)
elif project:
diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py
index 34de1b364..6fc8e21ab 100644
--- a/ironic/db/sqlalchemy/models.py
+++ b/ironic/db/sqlalchemy/models.py
@@ -25,6 +25,7 @@ from urllib import parse as urlparse
from oslo_db import options as db_options
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import types as db_types
+from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy import Boolean, Column, DateTime, false, Index
from sqlalchemy import ForeignKey, Integer
from sqlalchemy import schema, String, Text
@@ -262,6 +263,15 @@ class Port(Base):
is_smartnic = Column(Boolean, nullable=True, default=False)
name = Column(String(255), nullable=True)
+ _node_uuid = orm.relationship(
+ "Node",
+ viewonly=True,
+ primaryjoin="(Node.id == Port.node_id)",
+ lazy="selectin",
+ )
+ node_uuid = association_proxy(
+ "_node_uuid", "uuid", creator=lambda _i: Node(uuid=_i))
+
class Portgroup(Base):
"""Represents a group of network ports of a bare metal node."""
diff --git a/ironic/drivers/modules/inspector.py b/ironic/drivers/modules/inspector.py
index 45f3a87f5..20911cbaa 100644
--- a/ironic/drivers/modules/inspector.py
+++ b/ironic/drivers/modules/inspector.py
@@ -28,6 +28,7 @@ from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import keystone
from ironic.common import states
+from ironic.common import swift
from ironic.common import utils
from ironic.conductor import periodics
from ironic.conductor import task_manager
@@ -43,6 +44,7 @@ LOG = logging.getLogger(__name__)
_INSPECTOR_SESSION = None
# Internal field to mark whether ironic or inspector manages boot for the node
_IRONIC_MANAGES_BOOT = 'inspector_manage_boot'
+_OBJECT_NAME_PREFIX = 'inspector_data'
def _get_inspector_session(**kwargs):
@@ -365,14 +367,31 @@ def _check_status(task):
_inspection_error_handler(task, error)
elif status.is_finished:
_clean_up(task)
+ # If store_data == 'none', do not store the data
+ store_data = CONF.inspector.inventory_data_backend
+ if store_data == 'none':
+ LOG.debug('Introspection data storage is disabled, the data will '
+ 'not be saved for node %(node)s', {'node': node.uuid})
+ return
introspection_data = inspector_client.get_introspection_data(
node.uuid, processed=True)
inventory_data = introspection_data.pop("inventory")
plugin_data = introspection_data
- node_inventory.NodeInventory(
- node_id=node.id,
- inventory_data=inventory_data,
- plugin_data=plugin_data).create()
+ if store_data == 'database':
+ node_inventory.NodeInventory(
+ node_id=node.id,
+ inventory_data=inventory_data,
+ plugin_data=plugin_data).create()
+ LOG.info('Introspection data was stored in database for node '
+ '%(node)s', {'node': node.uuid})
+ if store_data == 'swift':
+ swift_object_name = store_introspection_data(
+ node_uuid=node.uuid,
+ inventory_data=inventory_data,
+ plugin_data=plugin_data)
+ LOG.info('Introspection data was stored for node %(node)s in Swift'
+ ' object %(obj_name)s-inventory and %(obj_name)s-plugin',
+ {'node': node.uuid, 'obj_name': swift_object_name})
def _clean_up(task):
@@ -387,3 +406,22 @@ def _clean_up(task):
LOG.info('Inspection finished successfully for node %s',
task.node.uuid)
task.process_event('done')
+
+
+def store_introspection_data(node_uuid, inventory_data, plugin_data):
+ """Uploads introspection data to Swift.
+
+ :param data: data to store in Swift
+ :param node_id: ID of the Ironic node that the data came from
+ :returns: name of the Swift object that the data is stored in
+ """
+ swift_api = swift.SwiftAPI()
+ swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
+ container = CONF.inspector.swift_inventory_data_container
+ swift_api.create_object_from_data(swift_object_name + '-inventory',
+ inventory_data,
+ container)
+ swift_api.create_object_from_data(swift_object_name + '-plugin',
+ plugin_data,
+ container)
+ return swift_object_name
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
index 809ec59c6..154cd53d3 100644
--- a/ironic/drivers/modules/redfish/raid.py
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -1120,7 +1120,9 @@ class RedfishRAID(base.RAIDInterface):
raid_configs['pending'].setdefault(controller, []).append(
logical_disk)
- node.set_driver_internal_info('raid_configs', raid_configs)
+ # Store only when async operation
+ if reboot_required:
+ node.set_driver_internal_info('raid_configs', raid_configs)
return raid_configs, reboot_required
@@ -1182,7 +1184,9 @@ class RedfishRAID(base.RAIDInterface):
response.task_monitor_uri)
reboot_required = True
- node.set_driver_internal_info('raid_configs', raid_configs)
+ # Store only when async operation
+ if reboot_required:
+ node.set_driver_internal_info('raid_configs', raid_configs)
return raid_configs, reboot_required
diff --git a/ironic/objects/port.py b/ironic/objects/port.py
index b4d0e78eb..d45bee4b5 100644
--- a/ironic/objects/port.py
+++ b/ironic/objects/port.py
@@ -44,7 +44,8 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# change)
# Version 1.9: Add support for Smart NIC port
# Version 1.10: Add name field
- VERSION = '1.10'
+ # Version 1.11: Add node_uuid field
+ VERSION = '1.11'
dbapi = dbapi.get_instance()
@@ -52,6 +53,7 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
'id': object_fields.IntegerField(),
'uuid': object_fields.UUIDField(nullable=True),
'node_id': object_fields.IntegerField(nullable=True),
+ 'node_uuid': object_fields.UUIDField(nullable=True),
'address': object_fields.MACAddressField(nullable=True),
'extra': object_fields.FlexibleDictField(nullable=True),
'local_link_connection': object_fields.FlexibleDictField(
@@ -377,6 +379,10 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
"""
values = self.do_version_changes_for_db()
db_port = self.dbapi.create_port(values)
+ # NOTE(hjensas): To avoid lazy load issue (DetachedInstanceError) in
+ # sqlalchemy, get new port the port from the DB to ensure the node_uuid
+ # via association_proxy relationship is loaded.
+ db_port = self.dbapi.get_port_by_id(db_port['id'])
self._from_db_object(self._context, self, db_port)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
diff --git a/ironic/tests/unit/api/controllers/v1/test_port.py b/ironic/tests/unit/api/controllers/v1/test_port.py
index 6823c3b51..7092a4012 100644
--- a/ironic/tests/unit/api/controllers/v1/test_port.py
+++ b/ironic/tests/unit/api/controllers/v1/test_port.py
@@ -238,35 +238,6 @@ class TestListPorts(test_api_base.BaseApiTest):
# NOTE(jlvillal): autospec=True doesn't work on staticmethods:
# https://bugs.python.org/issue23078
- @mock.patch.object(objects.Node, 'get_by_id', spec_set=types.FunctionType)
- def test_list_with_deleted_node(self, mock_get_node):
- # check that we don't end up with HTTP 400 when node deletion races
- # with listing ports - see https://launchpad.net/bugs/1748893
- obj_utils.create_test_port(self.context, node_id=self.node.id)
- mock_get_node.side_effect = exception.NodeNotFound('boom')
- data = self.get_json('/ports')
- self.assertEqual([], data['ports'])
-
- # NOTE(jlvillal): autospec=True doesn't work on staticmethods:
- # https://bugs.python.org/issue23078
- @mock.patch.object(objects.Node, 'get_by_id',
- spec_set=types.FunctionType)
- def test_list_detailed_with_deleted_node(self, mock_get_node):
- # check that we don't end up with HTTP 400 when node deletion races
- # with listing ports - see https://launchpad.net/bugs/1748893
- port = obj_utils.create_test_port(self.context, node_id=self.node.id)
- port2 = obj_utils.create_test_port(self.context, node_id=self.node.id,
- uuid=uuidutils.generate_uuid(),
- address='66:44:55:33:11:22')
- mock_get_node.side_effect = [exception.NodeNotFound('boom'), self.node]
- data = self.get_json('/ports/detail')
- # The "correct" port is still returned
- self.assertEqual(1, len(data['ports']))
- self.assertIn(data['ports'][0]['uuid'], {port.uuid, port2.uuid})
- self.assertEqual(self.node.uuid, data['ports'][0]['node_uuid'])
-
- # NOTE(jlvillal): autospec=True doesn't work on staticmethods:
- # https://bugs.python.org/issue23078
@mock.patch.object(objects.Portgroup, 'get', spec_set=types.FunctionType)
def test_list_with_deleted_port_group(self, mock_get_pg):
# check that we don't end up with HTTP 400 when port group deletion
diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py
index c59ee1cc4..87ba4b514 100644
--- a/ironic/tests/unit/db/utils.py
+++ b/ironic/tests/unit/db/utils.py
@@ -272,6 +272,8 @@ def get_test_port(**kw):
'version': kw.get('version', port.Port.VERSION),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c781'),
'node_id': kw.get('node_id', 123),
+ 'node_uuid': kw.get('node_uuid',
+ '59d102f7-5840-4299-8ec8-80c0ebae9de1'),
'address': kw.get('address', '52:54:00:cf:2d:31'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_raid.py b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
index dfb3c1473..843be735c 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
@@ -336,6 +336,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(mock_node_power_action.call_count, 0)
self.assertEqual(mock_build_agent_options.call_count, 0)
self.assertEqual(mock_prepare_ramdisk.call_count, 0)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_configs'))
self.assertEqual(
[{'controller': 'RAID controller 1',
'id': '1',
@@ -1066,6 +1068,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(mock_node_power_action.call_count, 0)
self.assertEqual(mock_build_agent_options.call_count, 0)
self.assertEqual(mock_prepare_ramdisk.call_count, 0)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_configs'))
self.assertEqual([], task.node.raid_config['logical_disks'])
self.assertNotEqual(
last_updated, task.node.raid_config['last_updated'])
diff --git a/ironic/tests/unit/drivers/modules/test_inspector.py b/ironic/tests/unit/drivers/modules/test_inspector.py
index 5eb702e41..00de10189 100644
--- a/ironic/tests/unit/drivers/modules/test_inspector.py
+++ b/ironic/tests/unit/drivers/modules/test_inspector.py
@@ -19,6 +19,7 @@ import openstack
from ironic.common import context
from ironic.common import exception
from ironic.common import states
+from ironic.common import swift
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules import inspect_utils
@@ -553,7 +554,9 @@ class CheckStatusTestCase(BaseTestCase):
self.task)
self.driver.boot.clean_up_ramdisk.assert_called_once_with(self.task)
- def test_status_ok_store_inventory(self, mock_client):
+ def test_status_ok_store_inventory_in_db(self, mock_client):
+ CONF.set_override('inventory_data_backend', 'database',
+ group='inspector')
mock_get = mock_client.return_value.get_introspection
mock_get.return_value = mock.Mock(is_finished=True,
error=None,
@@ -571,7 +574,47 @@ class CheckStatusTestCase(BaseTestCase):
self.assertEqual({"disks": [{"name": "/dev/vda"}]},
stored["plugin_data"])
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test_status_ok_store_inventory_in_swift(self,
+ swift_api_mock, mock_client):
+ CONF.set_override('inventory_data_backend', 'swift', group='inspector')
+ CONF.set_override(
+ 'swift_inventory_data_container', 'introspection_data',
+ group='inspector')
+ mock_get = mock_client.return_value.get_introspection
+ mock_get.return_value = mock.Mock(is_finished=True,
+ error=None,
+ spec=['is_finished', 'error'])
+ mock_get_data = mock_client.return_value.get_introspection_data
+ fake_inventory_data = {"cpu": "amd"}
+ fake_plugin_data = {"disks": [{"name": "/dev/vda"}]}
+ mock_get_data.return_value = {
+ "inventory": fake_inventory_data, **fake_plugin_data}
+ swift_obj_mock = swift_api_mock.return_value
+ object_name = 'inspector_data-' + str(self.node.uuid)
+ inspector._check_status(self.task)
+ mock_get.assert_called_once_with(self.node.uuid)
+ mock_get_data.assert_called_once_with(self.node.uuid, processed=True)
+ container = 'introspection_data'
+ swift_obj_mock.create_object_from_data.assert_has_calls([
+ mock.call(object_name + '-inventory', fake_inventory_data,
+ container),
+ mock.call(object_name + '-plugin', fake_plugin_data, container)])
+
+ def test_status_ok_store_inventory_nostore(self, mock_client):
+ CONF.set_override('inventory_data_backend', 'none', group='inspector')
+ mock_get = mock_client.return_value.get_introspection
+ mock_get.return_value = mock.Mock(is_finished=True,
+ error=None,
+ spec=['is_finished', 'error'])
+ mock_get_data = mock_client.return_value.get_introspection_data
+ inspector._check_status(self.task)
+ mock_get.assert_called_once_with(self.node.uuid)
+ mock_get_data.assert_not_called()
+
def test_status_error_dont_store_inventory(self, mock_client):
+ CONF.set_override('inventory_data_backend', 'database',
+ group='inspector')
mock_get = mock_client.return_value.get_introspection
mock_get.return_value = mock.Mock(is_finished=True,
error='boom',
@@ -593,4 +636,5 @@ class InspectHardwareAbortTestCase(BaseTestCase):
mock_abort = mock_client.return_value.abort_introspection
mock_abort.side_effect = RuntimeError('boom')
self.assertRaises(RuntimeError, self.iface.abort, self.task)
+
mock_abort.assert_called_once_with(self.node.uuid)
diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py
index 2e45ee043..017a0d210 100644
--- a/ironic/tests/unit/objects/test_objects.py
+++ b/ironic/tests/unit/objects/test_objects.py
@@ -679,7 +679,7 @@ expected_object_fingerprints = {
'Node': '1.36-8a080e31ba89ca5f09e859bd259b54dc',
'MyObj': '1.5-9459d30d6954bffc7a9afd347a807ca6',
'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905',
- 'Port': '1.10-67381b065c597c8d3a13c5dbc6243c33',
+ 'Port': '1.11-97bf15b61224f26c65e90f007d78bfd2',
'Portgroup': '1.4-71923a81a86743b313b190f5c675e258',
'Conductor': '1.3-d3f53e853b4d58cae5bfbd9a8341af4a',
'EventType': '1.1-aa2ba1afd38553e3880c267404e8d370',
diff --git a/ironic/tests/unit/objects/test_port.py b/ironic/tests/unit/objects/test_port.py
index cf7633808..4c7280216 100644
--- a/ironic/tests/unit/objects/test_port.py
+++ b/ironic/tests/unit/objects/test_port.py
@@ -88,12 +88,16 @@ class TestPortObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
port = objects.Port(self.context, **self.fake_port)
with mock.patch.object(self.dbapi, 'create_port',
autospec=True) as mock_create_port:
- mock_create_port.return_value = db_utils.get_test_port()
+ with mock.patch.object(self.dbapi, 'get_port_by_id',
+ autospec=True) as mock_get_port:
+ test_port = db_utils.get_test_port()
+ mock_create_port.return_value = test_port
+ mock_get_port.return_value = test_port
- port.create()
+ port.create()
- args, _kwargs = mock_create_port.call_args
- self.assertEqual(objects.Port.VERSION, args[0]['version'])
+ args, _kwargs = mock_create_port.call_args
+ self.assertEqual(objects.Port.VERSION, args[0]['version'])
def test_save(self):
uuid = self.fake_port['uuid']
diff --git a/releasenotes/notes/fix-nonetype-object-is-not-iterable-0592926d890d6c11.yaml b/releasenotes/notes/fix-nonetype-object-is-not-iterable-0592926d890d6c11.yaml
new file mode 100644
index 000000000..ec9043adb
--- /dev/null
+++ b/releasenotes/notes/fix-nonetype-object-is-not-iterable-0592926d890d6c11.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes ``'NoneType' object is not iterable`` in conductor logs for
+ ``redfish`` and ``idrac-redfish`` RAID clean and deploy steps. The message
+ should no longer appear. For affected nodes re-create the node or delete
+ ``raid_configs`` entry from ``driver_internal_info`` field.
diff --git a/zuul.d/ironic-jobs.yaml b/zuul.d/ironic-jobs.yaml
index db897763b..c59c54e86 100644
--- a/zuul.d/ironic-jobs.yaml
+++ b/zuul.d/ironic-jobs.yaml
@@ -299,6 +299,9 @@
# result and makes this job VERY sensitive to heavy disk IO of the
# underlying hypervisor/cloud.
IRONIC_CALLBACK_TIMEOUT: 800
+ IRONIC_GRUB2_SHIM_FILE: https://mirror.iad3.inmotion.opendev.org/centos-stream/9-stream/BaseOS/x86_64/os/EFI/BOOT/BOOTX64.EFI
+ IRONIC_GRUB2_FILE: https://mirror.iad3.inmotion.opendev.org/centos-stream/9-stream/BaseOS/x86_64/os/EFI/BOOT/grubx64.efi
+ IRONIC_GRUB2_CONFIG_PATH: EFI/BOOT/grub.cfg
devstack_services:
s-account: True
s-container: True