summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bindep.txt6
-rw-r--r--devstack/files/bindep.txt4
-rw-r--r--devstack/lib/ironic53
-rwxr-xr-xdevstack/tools/ironic/scripts/cirros-partition.sh69
-rw-r--r--doc/source/admin/adoption.rst5
-rw-r--r--doc/source/admin/cleaning.rst54
-rw-r--r--doc/source/admin/drivers/idrac.rst18
-rw-r--r--doc/source/admin/drivers/ilo.rst5
-rw-r--r--doc/source/admin/drivers/irmc.rst4
-rw-r--r--doc/source/admin/drivers/redfish.rst51
-rw-r--r--doc/source/admin/drivers/snmp.rst29
-rw-r--r--doc/source/admin/report.txt2
-rw-r--r--doc/source/admin/secure-rbac.rst34
-rw-r--r--doc/source/admin/troubleshooting.rst125
-rw-r--r--doc/source/contributor/jobs-description.rst12
-rw-r--r--doc/source/install/configure-glance-images.rst40
-rw-r--r--doc/source/install/install.rst29
-rw-r--r--doc/source/user/creating-images.rst4
-rw-r--r--doc/source/user/deploy.rst22
-rw-r--r--driver-requirements.txt4
-rw-r--r--ironic/api/controllers/v1/allocation.py4
-rw-r--r--ironic/api/controllers/v1/chassis.py8
-rw-r--r--ironic/api/controllers/v1/collection.py8
-rw-r--r--ironic/api/controllers/v1/conductor.py2
-rw-r--r--ironic/api/controllers/v1/deploy_template.py1
-rw-r--r--ironic/api/controllers/v1/node.py24
-rw-r--r--ironic/api/controllers/v1/port.py9
-rw-r--r--ironic/api/controllers/v1/portgroup.py9
-rw-r--r--ironic/api/controllers/v1/volume_connector.py2
-rw-r--r--ironic/api/controllers/v1/volume_target.py2
-rw-r--r--ironic/cmd/api.py2
-rw-r--r--ironic/cmd/conductor.py8
-rw-r--r--ironic/cmd/singleprocess.py7
-rw-r--r--ironic/common/glance_service/image_service.py9
-rw-r--r--ironic/common/images.py28
-rw-r--r--ironic/common/kickstart_utils.py14
-rw-r--r--ironic/common/policy.py2
-rw-r--r--ironic/common/pxe_utils.py118
-rw-r--r--ironic/common/release_mappings.py42
-rw-r--r--ironic/common/rpc_service.py22
-rw-r--r--ironic/common/wsgi_service.py21
-rw-r--r--ironic/conductor/cleaning.py5
-rw-r--r--ironic/conductor/deployments.py17
-rw-r--r--ironic/conductor/manager.py34
-rw-r--r--ironic/conductor/utils.py38
-rw-r--r--ironic/conf/api.py16
-rw-r--r--ironic/conf/deploy.py5
-rw-r--r--ironic/conf/glance.py5
-rw-r--r--ironic/conf/redfish.py15
-rw-r--r--ironic/drivers/modules/agent.py12
-rw-r--r--ironic/drivers/modules/agent_base.py12
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/main.yaml4
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/main.yaml2
-rw-r--r--ironic/drivers/modules/deploy_utils.py57
-rw-r--r--ironic/drivers/modules/drac/bios.py6
-rw-r--r--ironic/drivers/modules/drac/management.py21
-rw-r--r--ironic/drivers/modules/drac/raid.py16
-rw-r--r--ironic/drivers/modules/ilo/boot.py18
-rw-r--r--ironic/drivers/modules/inspector.py12
-rw-r--r--ironic/drivers/modules/irmc/boot.py8
-rw-r--r--ironic/drivers/modules/ks.cfg.template15
-rw-r--r--ironic/drivers/modules/pxe.py22
-rw-r--r--ironic/drivers/modules/redfish/boot.py9
-rw-r--r--ironic/drivers/modules/redfish/firmware_utils.py201
-rw-r--r--ironic/drivers/modules/redfish/management.py72
-rw-r--r--ironic/drivers/modules/redfish/raid.py351
-rw-r--r--ironic/drivers/modules/snmp.py5
-rw-r--r--ironic/drivers/utils.py21
-rw-r--r--ironic/objects/node.py12
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_allocation.py14
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_conductor.py12
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_deploy_template.py13
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py10
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_port.py5
-rw-r--r--ironic/tests/unit/common/test_glance_service.py56
-rw-r--r--ironic/tests/unit/common/test_images.py42
-rw-r--r--ironic/tests/unit/common/test_kickstart_utils.py2
-rw-r--r--ironic/tests/unit/common/test_pxe_utils.py46
-rw-r--r--ironic/tests/unit/common/test_rpc_service.py31
-rw-r--r--ironic/tests/unit/conductor/test_deployments.py5
-rw-r--r--ironic/tests/unit/conductor/test_manager.py32
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_boot_from_ramdisk.template47
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_bios.py4
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_raid.py4
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py375
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_management.py219
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_raid.py344
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent.py9
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent_base.py13
-rw-r--r--ironic/tests/unit/drivers/modules/test_deploy_utils.py151
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspector.py12
-rw-r--r--ironic/tests/unit/drivers/modules/test_ipxe.py60
-rw-r--r--ironic/tests/unit/drivers/modules/test_pxe.py10
-rw-r--r--releasenotes/notes/add-more-sources-redfish-firmware-update-3da89f10dc0f8d21.yaml14
-rw-r--r--releasenotes/notes/anaconda-config-drive-fixes-5880884e34584549.yaml19
-rw-r--r--releasenotes/notes/anaconda-deploy-more-fixes-58d996c7031c8c4b.yaml33
-rw-r--r--releasenotes/notes/anaconda-instance-info-fix-a51837d8ac7b41de.yaml9
-rw-r--r--releasenotes/notes/api-none-3fdca1ccbb64d9b0.yaml12
-rw-r--r--releasenotes/notes/fix-redfish-raid-deploy-steps-e9ee1ea3d1f2a475.yaml2
-rw-r--r--releasenotes/notes/fix-redfish-raid-failed-tasks-02487c4698dea176.yaml6
-rw-r--r--releasenotes/notes/fix-redfish-raid-onreset-workflow-bfa44de6b0263a1f.yaml9
-rw-r--r--releasenotes/notes/idrac-redfish-clean-steps-not-require-ramdisk-699e169af39b0dd6.yaml5
-rw-r--r--releasenotes/notes/idrac-wsman-clean-steps-not-require-ramdisk-ca98aa5c0a88f727.yaml5
-rw-r--r--releasenotes/notes/image-type-ac259a90393bdd2c.yaml11
-rw-r--r--releasenotes/notes/initrd_filename-ac68e96f1c9fb576.yaml6
-rw-r--r--releasenotes/notes/known-issue-idrac-firmware-swift-721a19cac796e1ae.yaml8
-rw-r--r--releasenotes/notes/netboot-deprecation-fe5751a47df2d0b7.yaml14
-rw-r--r--releasenotes/notes/service-exit-77bcf3a538fab4bc.yaml5
-rw-r--r--releasenotes/notes/service-wait-e85cbe7978f61764.yaml5
-rw-r--r--releasenotes/notes/swift_account_prefix-dbc9e68890bff47c.yaml6
-rw-r--r--releasenotes/notes/unix-socket-48e8f1caf4cb19f9.yaml5
-rw-r--r--releasenotes/notes/update-idrac-redfish-management-export-conf-9fb17273c4d9a050.yaml10
-rw-r--r--tools/benchmark/generate-statistics.py2
-rw-r--r--tox.ini2
-rw-r--r--zuul.d/ironic-jobs.yaml19
-rw-r--r--zuul.d/project.yaml8
116 files changed, 3012 insertions, 642 deletions
diff --git a/bindep.txt b/bindep.txt
index 863787763..16adeecda 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -86,9 +86,13 @@ apparmor [platform:dpkg imagebuild]
gnupg [imagebuild]
squashfs-tools [platform:dpkg platform:redhat imagebuild]
squashfs [platform:suse imagebuild]
+# For custom partition images
+kpartx [devstack]
libguestfs0 [platform:dpkg imagebuild]
-libguestfs [platform:rpm imagebuild]
+libguestfs [platform:rpm imagebuild devstack]
+libguestfs-tools [platform:dpkg devstack]
python-guestfs [platform:dpkg imagebuild]
+qemu-img [platform:rpm devstack]
# for TinyIPA build
wget [imagebuild]
python-pip [imagebuild]
diff --git a/devstack/files/bindep.txt b/devstack/files/bindep.txt
index 8c386349a..820f9b8b0 100644
--- a/devstack/files/bindep.txt
+++ b/devstack/files/bindep.txt
@@ -87,9 +87,13 @@ apparmor [platform:dpkg imagebuild]
gnupg [imagebuild]
squashfs-tools [platform:dpkg platform:redhat imagebuild]
squashfs [platform:suse imagebuild]
+# For custom partition images
+kpartx
libguestfs0 [platform:dpkg imagebuild]
libguestfs [platform:rpm imagebuild]
+libguestfs-tools [platform:dpkg]
python-guestfs [platform:dpkg imagebuild]
+qemu-img [platform:rpm]
# for TinyIPA build
wget [imagebuild]
python-pip [imagebuild]
diff --git a/devstack/lib/ironic b/devstack/lib/ironic
index a044073db..366eb03b8 100644
--- a/devstack/lib/ironic
+++ b/devstack/lib/ironic
@@ -344,7 +344,7 @@ if [[ -z "$IRONIC_DIB_RAMDISK_OPTIONS" ]]; then
if [[ "$IRONIC_DIB_RAMDISK_OS" == "centos8" ]]; then
# Adapt for DIB naming change
IRONIC_DIB_RAMDISK_OS=centos
- IRONIC_DIB_RAMDISK_RELEASE=8
+ IRONIC_DIB_RAMDISK_RELEASE=8-stream
fi
IRONIC_DIB_RAMDISK_OPTIONS="$IRONIC_DIB_RAMDISK_OS"
fi
@@ -2852,6 +2852,46 @@ function build_ipa_dib_ramdisk {
rm -rf $tempdir
}
+function upload_image_if_needed {
+ if [[ "$IRONIC_PARTITIONED_IMAGE_NAME" =~ cirros ]] && is_service_enabled glance; then
+ echo Building a Cirros image suitable for local boot
+
+ local dest
+ IRONIC_PARTITIONED_IMAGE_NAME=cirros-${CIRROS_VERSION}-x86_64-partition
+ dest="$IRONIC_DATA_DIR/$IRONIC_PARTITIONED_IMAGE_NAME.img"
+
+ # Export some variables that the script is using.
+ CIRROS_ARCH=$CIRROS_ARCH CIRROS_VERSION=$CIRROS_VERSION \
+ IRONIC_TTY_DEV=$IRONIC_TTY_DEV VERBOSE=$VERBOSE \
+ $IRONIC_SCRIPTS_DIR/cirros-partition.sh "$dest"
+
+ # TODO(dtantsur): stop uploading kernel/ramdisk when image_type support
+ # lands.
+ local kernel_id
+ kernel_id=$(openstack image list -f value -c ID -c Name \
+ | awk '/cirros.*kernel/ { print $1; exit 0; }')
+ die_if_not_set $LINENO kernel_id "Cannot find cirros kernel"
+
+ local ramdisk_id
+ ramdisk_id=$(openstack image list -f value -c ID -c Name \
+ | awk '/cirros.*ramdisk/ { print $1; exit 0; }')
+ die_if_not_set $LINENO ramdisk_id "Cannot find cirros ramdisk"
+
+ openstack image create $IRONIC_PARTITIONED_IMAGE_NAME \
+ --public --disk-format raw --container-format bare \
+ --property kernel_id=$kernel_id --property ramdisk_id=$ramdisk_id \
+ --file "$dest"
+
+ # Change the default image only if the provided settings prevent the
+ # default cirros image from working.
+ if [[ "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" != True \
+ && "$IRONIC_DEFAULT_BOOT_OPTION" == local ]]; then
+ IRONIC_IMAGE_NAME=$IRONIC_PARTITIONED_IMAGE_NAME
+ DEFAULT_IMAGE_NAME=$IRONIC_IMAGE_NAME
+ fi
+ fi
+}
+
# download EFI boot loader image and upload it to glance
# this function sets ``IRONIC_EFIBOOT_ID``
function upload_baremetal_ironic_efiboot {
@@ -3030,6 +3070,8 @@ function prepare_baremetal_basic_ops {
upload_baremetal_ironic_efiboot
fi
+ upload_image_if_needed
+
configure_tftpd
configure_iptables
}
@@ -3152,6 +3194,13 @@ function ironic_configure_tempest {
iniset $TEMPEST_CONFIG baremetal partition_image_ref $image_uuid
fi
+ # Our cirros images cannot do local boot in legacy mode.
+ if [[ "${IRONIC_PARTITIONED_IMAGE_NAME}" =~ cirros && "${IRONIC_BOOT_MODE}" == "bios" ]]; then
+ iniset $TEMPEST_CONFIG baremetal partition_netboot True
+ else
+ iniset $TEMPEST_CONFIG baremetal partition_netboot False
+ fi
+
if [[ "$IRONIC_IP_VERSION" == "6" ]]; then
iniset $TEMPEST_CONFIG baremetal whole_disk_image_url "http://$IRONIC_HOST_IPV6:$IRONIC_HTTP_PORT/${IRONIC_WHOLEDISK_IMAGE_NAME}.img"
else
@@ -3174,6 +3223,8 @@ function ironic_configure_tempest {
# Driver for API tests
iniset $TEMPEST_CONFIG baremetal driver fake-hardware
+ iniset $TEMPEST_CONFIG baremetal default_boot_option $IRONIC_DEFAULT_BOOT_OPTION
+
local adjusted_root_disk_size_gb
if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then
adjusted_root_disk_size_gb=$(( ${IRONIC_VM_SPECS_DISK} - ${IRONIC_VM_EPHEMERAL_DISK} ))
diff --git a/devstack/tools/ironic/scripts/cirros-partition.sh b/devstack/tools/ironic/scripts/cirros-partition.sh
new file mode 100755
index 000000000..40c87b19e
--- /dev/null
+++ b/devstack/tools/ironic/scripts/cirros-partition.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+set -eu -o pipefail
+
+VERBOSE=${VERBOSE:-True}
+if [[ "$VERBOSE" == True ]]; then
+ set -x
+ guestfish_args="--verbose"
+fi
+
+CIRROS_VERSION=${CIRROS_VERSION:-0.5.2}
+CIRROS_ARCH=${CIRROS_ARCH:-x86_64}
+# TODO(dtantsur): use the image cached on infra images in the CI
+DISK_URL=http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img
+OUT=$(realpath ${1:-rootfs.img})
+
+IRONIC_TTY_DEV=${IRONIC_TTY_DEV:-ttyS0,115200}
+# rdroot : boot from the ramdisk present on the root partition instead of
+# mounting the root partition.
+# dslist : disable Nova metadata support, it takes a long time on boot.
+KARGS=${KARGS:-nofb nomodeset vga=normal console=${IRONIC_TTY_DEV} rdroot dslist=configdrive}
+
+workdir=$(mktemp -d)
+root_mp=$workdir/root
+efi_mp=$workdir/efi
+dest=$workdir/dest
+
+cd $workdir
+
+curl -Lf -o disk.qcow2 $DISK_URL
+qemu-img convert -O raw disk.qcow2 disk.img
+rm disk.qcow2
+
+# kpartx automatically allocates loop devices for all partitions in the image
+device=$(sudo kpartx -av disk.img | grep -oE 'loop[0-9]+p' | head -1)
+
+function clean_up {
+ set +e
+ sudo umount $efi_mp
+ sudo umount $root_mp
+ sudo kpartx -d $workdir/disk.img
+ sudo rm -rf $workdir
+}
+trap clean_up EXIT
+
+# TODO(dtantsur): some logic instead of hardcoding numbers 1 and 15?
+rootdev=/dev/mapper/${device}1
+efidev=/dev/mapper/${device}15
+
+mkdir -p $root_mp $efi_mp $dest/boot/efi
+sudo mount $rootdev $root_mp
+sudo mount $efidev $efi_mp
+
+sudo cp -aR $root_mp/* $dest/
+sudo cp -aR $efi_mp/EFI $dest/boot/efi/
+
+# These locations are required by IPA even when it does not really run
+# grub-install.
+sudo mkdir -p $dest/{dev,proc,run,sys}
+
+# The default arguments don't work for us, update grub configuration.
+sudo sed -i "/^ *linux /s/\$/ $KARGS/" $dest/boot/efi/EFI/ubuntu/grub.cfg
+
+LIBGUESTFS_BACKEND=direct sudo -E \
+ virt-make-fs --size +50M --type ext3 --label cirros-rootfs \
+ ${guestfish_args:-} "$dest" "$OUT"
+
+sudo chown $USER "$OUT"
+qemu-img info "$OUT"
diff --git a/doc/source/admin/adoption.rst b/doc/source/admin/adoption.rst
index ba404fd0b..570b36072 100644
--- a/doc/source/admin/adoption.rst
+++ b/doc/source/admin/adoption.rst
@@ -51,10 +51,7 @@ The adoption process makes no changes to the physical node, with the
exception of operator supplied configurations where virtual media is
used to boot the node under normal circumstances. An operator should
ensure that any supplied configuration defining the node is sufficient
-for the continued operation of the node moving forward. Such as, if the
-node is configured to network boot via instance_info/boot_option="netboot",
-then appropriate driver specific node configuration should be set to
-support this capability.
+for the continued operation of the node moving forward.
Possible Risk
=============
diff --git a/doc/source/admin/cleaning.rst b/doc/source/admin/cleaning.rst
index cf972622e..d800be501 100644
--- a/doc/source/admin/cleaning.rst
+++ b/doc/source/admin/cleaning.rst
@@ -73,6 +73,60 @@ cleaning steps.
See `How do I change the priority of a cleaning step?`_ for more information.
+Storage cleaning options
+------------------------
+
+Clean steps specific to storage are ``erase_devices``,
+``erase_devices_metadata`` and (added in Yoga) ``erase_devices_express``.
+
+``erase_devices`` aims to ensure that the data is removed in the most secure
+way available. On devices that support hardware assisted secure erasure
+(many NVMe and some ATA drives) this is the preferred option. If
+hardware-assisted secure erasure is not available and if
+``[deploy]/continue_if_disk_secure_erase_fails`` is set to ``True``, cleaning
+will fall back to using ``shred`` to overwrite the contents of the device.
+Otherwise cleaning will fail. It is important to note that ``erase_devices``
+may take a very long time (hours or even days) to complete, unless fast,
+hardware assisted data erasure is supported by all the devices in a system.
+Generally, it is very difficult (if possible at all) to recover data after
+performing cleaning with ``erase_devices``.
+
+``erase_devices_metadata`` clean step doesn't provide as strong assurance
+of irreversible destruction of data as ``erase_devices``. However, it has the
+advantage of a reasonably quick runtime (seconds to minutes). It operates by
+destroying metadata of the storage device without erasing every bit of the
+data itself. Attempts of restoring data after running
+``erase_devices_metadata`` may be successful but would certainly require
+relevant expertise and specialized tools.
+
+Lastly, ``erase_devices_express`` combines some of the perks of both
+``erase_devices`` and ``erase_devices_metadata``. It attempts to utilize
+hardware assisted data erasure features if available (currently only NVMe
+devices are supported). In case hardware-asssisted data erasure is not
+available, it falls back to metadata erasure for the device (which is
+identical to ``erase_devices_metadata``). It can be considered a
+time optimized mode of storage cleaning, aiming to perform as thorough
+data erasure as it is possible within a short period of time.
+This clean step is particularly well suited for environments with hybrid
+NVMe-HDD storage configuration as it allows fast and secure erasure of data
+stored on NVMes combined with equally fast but more basic metadata-based
+erasure of data on HDDs.
+``erase_devices_express`` is disabled by default. In order to use it, the
+following configuration is recommended.
+
+.. code-block:: ini
+
+ [deploy]/erase_devices_priority=0
+ [deploy]/erase_devices_metadata_priority=0
+ [conductor]/clean_step_priority_override=deploy.erase_devices_express:5
+
+This ensures that ``erase_devices`` and ``erase_devices_metadata`` are
+disabled so that storage is not cleaned twice and then assigns a non-zero
+priority to ``erase_devices_express``, hence enabling it. Any non-zero
+priority specified in the priority override will work.
+
+Also `[deploy]/enable_nvme_secure_erase` should not be disabled (it is on by default).
+
.. show-steps::
:phase: cleaning
diff --git a/doc/source/admin/drivers/idrac.rst b/doc/source/admin/drivers/idrac.rst
index 494d151a7..9a1c999ea 100644
--- a/doc/source/admin/drivers/idrac.rst
+++ b/doc/source/admin/drivers/idrac.rst
@@ -466,6 +466,15 @@ RAID Interface
See :doc:`/admin/raid` for more information on Ironic RAID support.
+RAID interface of ``redfish`` hardware type can be used on iDRAC systems.
+Compared to ``redfish`` RAID interface, using ``idrac-redfish`` adds:
+
+* Waiting for real-time operations to be available on RAID controllers. When
+ using ``redfish`` this is not guaranteed and reboots might be intermittently
+ required to complete,
+* Converting non-RAID disks to RAID mode if there are any,
+* Clearing foreign configuration, if any, after deleting virtual disks.
+
The following properties are supported by the iDRAC WSMAN and Redfish RAID
interface implementation:
@@ -925,4 +934,13 @@ selected if default plug-in type has been used and never changed. Systems that
have plug-in type changed will keep selected plug-in type after iDRAC firmware
upgrade.
+Firmware update from Swift fails
+--------------------------------
+
+When using Swift to stage firmware update files in Management interface
+``firmware_update`` clean step of ``redfish`` or ``idrac`` hardware type, the
+cleaning fails with error "An internal error occurred. Unable to complete the
+specified operation." in iDRAC job. Until this is fixed, use HTTP service to
+stage firmware files for iDRAC.
+
.. _SCP_Reference_Guide: http://downloads.dell.com/manuals/common/dellemc-server-config-profile-refguide.pdf
diff --git a/doc/source/admin/drivers/ilo.rst b/doc/source/admin/drivers/ilo.rst
index 40bb06735..4ffa8bcfb 100644
--- a/doc/source/admin/drivers/ilo.rst
+++ b/doc/source/admin/drivers/ilo.rst
@@ -1084,6 +1084,11 @@ intermediate images on conductor as described in
Deploy Process
==============
+.. note::
+ Network boot is deprecated and will be removed in the Zed release.
+
+.. TODO(dtantsur): review these diagrams to exclude netboot.
+
Netboot with glance and swift
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/admin/drivers/irmc.rst b/doc/source/admin/drivers/irmc.rst
index af7d10506..7e53bf8bb 100644
--- a/doc/source/admin/drivers/irmc.rst
+++ b/doc/source/admin/drivers/irmc.rst
@@ -235,14 +235,14 @@ For more information on node automated cleaning, see :ref:`automated_cleaning`
Boot from Remote Volume
^^^^^^^^^^^^^^^^^^^^^^^
-The ``irmc`` hardware type supports the generic iPXE-based remote volume
+The ``irmc`` hardware type supports the generic PXE-based remote volume
booting when using the following boot interfaces:
* ``irmc-pxe``
* ``pxe``
In addition, the ``irmc`` hardware type supports remote volume booting without
-iPXE. This is available when using the ``irmc-virtual-media`` boot interface.
+PXE. This is available when using the ``irmc-virtual-media`` boot interface.
This feature configures a node to boot from a remote volume by using the API
of iRMC. It supports iSCSI and FibreChannel.
diff --git a/doc/source/admin/drivers/redfish.rst b/doc/source/admin/drivers/redfish.rst
index 0878b08bf..d2d93d9ff 100644
--- a/doc/source/admin/drivers/redfish.rst
+++ b/doc/source/admin/drivers/redfish.rst
@@ -385,6 +385,8 @@ The ``update_firmware`` cleaning step accepts JSON in the following format::
"firmware_images":[
{
"url": "<url_to_firmware_image1>",
+ "checksum": "<checksum for image, uses SHA1>",
+ "source": "<optional override source setting for image>",
"wait": <number_of_seconds_to_wait>
},
{
@@ -410,16 +412,21 @@ Each firmware image dictionary, is of the form::
{
"url": "<URL of firmware image file>",
+ "checksum": "<checksum for image, uses SHA1>",
+ "source": "<Optional override source setting for image>",
"wait": <Optional time in seconds to wait after applying update>
}
-The ``url`` argument in the firmware image dictionary is mandatory, while the
-``wait`` argument is optional.
+The ``url``and ``checksum`` arguments in the firmware image dictionary are
+mandatory, while the ``source`` and ``wait`` arguments are optional.
+For ``url`` currently ``http``, ``https``, ``swift`` and ``file`` schemes are
+supported.
+
+``source`` corresponds to ``[redfish]firmware_source`` and by setting it here,
+it is possible to override global setting per firmware image in clean step
+arguments.
-.. note::
- Only ``http`` and ``https`` URLs are currently supported in the ``url``
- argument.
.. note::
At the present time, targets for the firmware update cannot be specified.
@@ -427,19 +434,20 @@ The ``url`` argument in the firmware image dictionary is mandatory, while the
node. It is assumed that the BMC knows what components a given firmware
image is applicable to.
-To perform a firmware update, first download the firmware to a web server that
-the BMC has network access to. This could be the ironic conductor web server
-or another web server on the BMC network. Using a web browser, curl, or similar
-tool on a server that has network access to the BMC, try downloading
-the firmware to verify that the URLs are correct and that the web server is
-configured properly.
+To perform a firmware update, first download the firmware to a web server,
+Swift or filesystem that the Ironic conductor or BMC has network access to.
+This could be the ironic conductor web server or another web server on the BMC
+network. Using a web browser, curl, or similar tool on a server that has
+network access to the BMC or Ironic conductor, try downloading the firmware to
+verify that the URLs are correct and that the web server is configured
+properly.
Next, construct the JSON for the firmware update cleaning step to be executed.
When launching the firmware update, the JSON may be specified on the command
-line directly or in a file. The following
-example shows one cleaning step that installs two firmware updates. The first
-updates the BMC firmware followed by a five minute wait to allow the BMC time
-to start back up. The second updates the firmware on all applicable NICs.::
+line directly or in a file. The following example shows one cleaning step that
+installs four firmware updates. All except 3rd entry that has explicit
+``source`` added, uses setting from ``[redfish]firmware_source`` to determine
+if and where to stage the files::
[{
"interface": "management",
@@ -448,10 +456,21 @@ to start back up. The second updates the firmware on all applicable NICs.::
"firmware_images":[
{
"url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "checksum": "<sha1-checksum-of-the-file>",
"wait": 300
},
{
- "url": "https://192.0.2.10/NIC_19.0.12_A00.EXE"
+ "url": "https://192.0.2.10/NIC_19.0.12_A00.EXE",
+ "checksum": "<sha1-checksum-of-the-file>"
+ },
+ {
+ "url": "file:///firmware_images/idrac/9/PERC_WN64_6.65.65.65_A00.EXE",
+ "checksum": "<sha1-checksum-of-the-file>",
+ "source": "http"
+ },
+ {
+ "url": "swift://firmware_container/BIOS_W8Y0W_WN64_2.1.7.EXE",
+ "checksum": "<sha1-checksum-of-the-file>"
}
]
}
diff --git a/doc/source/admin/drivers/snmp.rst b/doc/source/admin/drivers/snmp.rst
index 7a91bc126..1c402ab9b 100644
--- a/doc/source/admin/drivers/snmp.rst
+++ b/doc/source/admin/drivers/snmp.rst
@@ -77,30 +77,20 @@ Enabling the SNMP Hardware Type
enabled_management_interfaces = noop
enabled_power_interfaces = snmp
-#. To set the default boot option, update ``default_boot_option`` in
+#. To enable the network boot fallback, update ``enable_netboot_fallback`` in
``ironic.conf``:
.. code-block:: ini
- [DEFAULT]
- default_boot_option = netboot
-
- .. note::
- Currently the default value of ``default_boot_option`` is ``netboot``
- but it will be changed to ``local`` in the future. It is recommended
- to set an explicit value for this option.
+ [pxe]
+ enable_netboot_fallback = True
.. note::
- It is important to set ``boot_option`` to ``netboot`` as SNMP hardware
- type does not support setting of boot devices. One can also configure
- a node to boot using ``netboot`` by setting its ``capabilities`` and
- updating Nova flavor as described below:
-
- .. code-block:: console
-
- baremetal node set --property capabilities="boot_option:netboot" <node>
- openstack flavor set --property "capabilities:boot_option"="netboot" ironic-flavor
-
+ It is important to enable the fallback as SNMP hardware type does not
+ support setting of boot devices. When booting in legacy (BIOS) mode,
+ the generated network booting artifact will force booting from local
+ disk. In UEFI mode, Ironic will configure the boot order using UEFI
+ variables.
#. Restart the Ironic conductor service.
@@ -165,5 +155,4 @@ type:
--driver snmp --driver-info snmp_driver=<pdu_manufacturer> \
--driver-info snmp_address=<ip_address> \
--driver-info snmp_outlet=<outlet_index> \
- --driver-info snmp_community=<community_string> \
- --properties capabilities=boot_option:netboot
+ --driver-info snmp_community=<community_string>
diff --git a/doc/source/admin/report.txt b/doc/source/admin/report.txt
index e098e5aef..1f1fc4d8e 100644
--- a/doc/source/admin/report.txt
+++ b/doc/source/admin/report.txt
@@ -321,7 +321,7 @@ default:
deploy:
continue_if_disk_secure_erase_fails = False
- default_boot_option = netboot
+ default_boot_option = local
erase_devices_metadata_priority = None
erase_devices_priority = 0
http_root = /opt/stack/data/ironic/httpboot
diff --git a/doc/source/admin/secure-rbac.rst b/doc/source/admin/secure-rbac.rst
index 65fef8d65..d80643c66 100644
--- a/doc/source/admin/secure-rbac.rst
+++ b/doc/source/admin/secure-rbac.rst
@@ -16,7 +16,7 @@ customization of these policies to consult some reference material
in hopes of understanding the context.
* `Keystone Adminstrator Guide - Service API Protection <https://docs.openstack.org/keystone/latest/admin/service-api-protection.html>`_
-* `Ironic Scoped Role Based Access Control Specification <https://specs.openstack.org/openstack/ironic-specs/specs/not-implemented/secure-rbac.html>`_
+* `Ironic Scoped Role Based Access Control Specification <https://specs.openstack.org/openstack/ironic-specs/specs/17.0/secure-rbac.html>`_
Historical Context - How we reached our access model
----------------------------------------------------
@@ -32,7 +32,7 @@ stored in these fields.
System Scoped
=============
-System scoped authentication is intended for "administrative" activites such
+System scoped authentication is intended for "administrative" activities such
as those crossing tenants/projects, as all tenants/projects should be visible
to ``system`` scoped users in Ironic.
@@ -44,8 +44,8 @@ policy enforcement framework the information necessary to make decisions.
System scoped requests very much align with the access controls of Ironic
before the Secure RBAC effort. The original custom role ``baremetal_admin``
-privilges are identical to a system scoped ``admin``'s privilges.
-Similarlly ``baremetal_reader`` is identical to a system scoped ``reader``.
+privileges are identical to a system scoped ``admin``'s privileges.
+Similarly ``baremetal_observer`` is identical to a system scoped ``reader``.
In these concepts, the ``admin`` is allowed to create/delete objects/items.
The ``reader`` is allowed to read details about items and is intended for
users who may need an account with read-only access for or front-line support
@@ -100,8 +100,8 @@ Supported Endpoints
How Project Scoped Works
------------------------
-Ironic has two project use models where access is generally more delagative
-to an ``owner`` where access to a ``lessee`` is generally more utilitarian.
+Ironic has two project use models where access is generally more delegative
+to an ``owner`` and access to a ``lessee`` is generally more utilitarian.
The purpose of an owner, is more to enable the System Operator to delegate
much of the administrative activity of a Node to the owner.
@@ -113,7 +113,7 @@ applicable.
The purpose of a lessee is more for a *tenant* in their *project* to
be able to have access to perform basic actions with the API. In some cases
that may be to reprovision or rebuild a node. Ultimately that is the lessee's
-progative, but by default there are actions and field updates that cannot
+prerogative, but by default there are actions and field updates that cannot
be performed by default. This is also governed by access level within
a project.
@@ -131,13 +131,13 @@ Field value visibility restrictions
Ironic's API, by default has a concept of filtering node values to prevent
sensitive data from being leaked. System scoped users are subjected to basic
-restrictions, where as project scoped users are, by default, examined further
+restrictions, whereas project scoped users are, by default, examined further
and against additional policies. This threshold is controlled with the
``baremetal:node:get:filter_threshold``.
By default, the following fields are masked on Nodes and are controlled by the
-associated policies. By default, owner's are able to see insight into the
-infrastucture, where as lessee users *CANNOT* view these fields by default.
+associated policies. By default, owners are able to see insight into the
+infrastructure, whereas lessee users *CANNOT* view these fields by default.
* ``last_error`` - ``baremetal:node:get:last_error``
* ``reservation`` - ``baremetal:node:get:reservation``
@@ -175,10 +175,10 @@ Allocations
~~~~~~~~~~~
The ``allocations`` endpoint of the API is somewhat different than other
-other endpoints as it allows for the allocation of physical machines to
+endpoints as it allows for the allocation of physical machines to
an admin. In this context, there is not already an ``owner`` or ``project_id``
to leverage to control access for the creation process, any project member
-does have the inherent prilege of requesting an allocation. That being said,
+does have the inherent privilege of requesting an allocation. That being said,
their allocation request will require physical nodes to be owned or leased
to the ``project_id`` through the ``node`` fields ``owner`` or ``lessee``.
@@ -188,7 +188,7 @@ and any new allocation being requested with a specific owner, if made in
the allocation.
Ultimately, an operational behavior difference exists between the ``owner``
-and ``lessee`` rights in terms of allocations exists. With the standard
+and ``lessee`` rights in terms of allocations. With the standard
access rights, ``lessee`` users are able to create allocations if they
own nodes which are not allocated or deployed, but they cannot reprovision
nodes when using only a ``member`` role. This limitation is not the case
@@ -203,8 +203,8 @@ for project-scoped users with the ``admin`` role.
and operators may change the default restriction via the
``baremetal:allocation:create_restricted`` policy.
-Pratical differences
---------------------
+Practical differences
+---------------------
Most users, upon implementing the use of ``system`` scoped authentication
should not notice a difference as long as their authentication token is
@@ -212,11 +212,11 @@ properly scoped to ``system`` and with the appropriate role for their
access level. For most users who used a ``baremetal`` project,
or other custom project via a custom policy file, along with a custom
role name such as ``baremetal_admin``, this will require changing
-the user to be a ``system`` scoped user with ``admin`` privilges.
+the user to be a ``system`` scoped user with ``admin`` privileges.
The most noticeable difference for API consumers is the HTTP 403 access
code is now mainly a HTTP 404 access code. The access concept has changed
-from "Does the user user broadly has access to the API?" to
+from "Does the user broadly have access to the API?" to
"Does user have access to the node, and then do they have access
to the specific resource?".
diff --git a/doc/source/admin/troubleshooting.rst b/doc/source/admin/troubleshooting.rst
index 5cd2ec751..8cf49392f 100644
--- a/doc/source/admin/troubleshooting.rst
+++ b/doc/source/admin/troubleshooting.rst
@@ -469,7 +469,8 @@ the conductor is actively working on something related to the node.
Often, this means there is an internal lock or ``reservation`` set on the node
and the conductor is downloading, uploading, or attempting to perform some
-sort of Input/Output operation.
+sort of Input/Output operation - see `Why does API return "Node is locked by
+host"?`_ for details.
In the case the conductor gets stuck, these operations should timeout,
but there are cases in operating systems where operations are blocked until
@@ -677,12 +678,16 @@ How do I resolve this?
Generally, you need to identify the port with the offending MAC address.
Example:
- openstack port list --mac-address 52:54:00:7c:c4:56
+.. code-block:: console
+
+ $ openstack port list --mac-address 52:54:00:7c:c4:56
From the command's output, you should be able to identify the ``id`` field.
Using that, you can delete the port. Example:
- openstack port delete <id>
+.. code-block:: console
+
+ $ openstack port delete <id>
.. warning::
Before deleting a port, you should always verify that it is no longer in
@@ -810,7 +815,9 @@ Example failure
A node in this state, when the ``network_interface`` was saved as ``neutron``,
yet the ``neutron`` interface is no longer enabled will fail basic state
-transition requests.:
+transition requests:
+
+.. code-block:: console
$ baremetal node manage 7164efca-37ab-1213-1112-b731cf795a5a
Could not find the following interface in the 'ironic.hardware.interfaces.network' entrypoint: neutron. Valid interfaces are ['flat']. (HTTP 400)
@@ -826,7 +833,9 @@ order of interfaces in the for the ``enabled_*_interfaces`` options.
Once the conductor has been restarted with the updated configuration, you
should now be able to update the interface using the ``baremetal node set``
command. In this example we use the ``network_interface`` as this is most
-commonly where it is encountered.:
+commonly where it is encountered:
+
+.. code-block:: console
$ baremetal node set $NAME_OR_UUID --network-interface flat
@@ -869,14 +878,98 @@ How do I resolve this?
This can be addressed a few different ways:
- * Use raw images, however these images can be substantially larger
- and require more data to be transmitted "over the wire".
- * Add more physical memory.
- * Add swap space.
- * Reduce concurrency, possibly via another conductor or changing the
- nova-compute.conf ``max_concurrent_builds`` parameter.
- * Or finally, adjust the ``[DEFAULT]minimum_required_memory`` parameter
- in your ironic.conf file. The default should be considered a "default
- of last resort" and you may need to reserve additional memory. You may
- also wish to adjust the ``[DEFAULT]minimum_memory_wait_retries`` and
- ``[DEFAULT]minimum_memory_wait_time`` parameters.
+* Use raw images, however these images can be substantially larger
+ and require more data to be transmitted "over the wire".
+* Add more physical memory.
+* Add swap space.
+* Reduce concurrency, possibly via another conductor or changing the
+ nova-compute.conf ``max_concurrent_builds`` parameter.
+* Or finally, adjust the ``[DEFAULT]minimum_required_memory`` parameter
+ in your ironic.conf file. The default should be considered a "default
+ of last resort" and you may need to reserve additional memory. You may
+ also wish to adjust the ``[DEFAULT]minimum_memory_wait_retries`` and
+ ``[DEFAULT]minimum_memory_wait_time`` parameters.
+
+Why does API return "Node is locked by host"?
+=============================================
+
+This error usually manifests as HTTP error 409 on the client side:
+
+ Node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 is locked by host 192.168.122.1,
+ please retry after the current operation is completed.
+
+It happens, because an operation that modifies a node is requested, while
+another such operation is running. The conflicting operation may be user
+requested (e.g. a provisioning action) or related to the internal processes
+(e.g. changing power state during :doc:`power-sync`). The reported host name
+corresponds to the conductor instance that holds the lock.
+
+Normally, these errors are transient and safe to retry after a few seconds. If
+the lock is held for significant time, these are the steps you can take.
+
+First of all, check the current ``provision_state`` of the node:
+
+``verifying``
+ means that the conductor is trying to access the node's BMC.
+ If it happens for minutes, it means that the BMC is either unreachable or
+ misbehaving. Double-check the information in ``driver_info``, especially
+ the BMC address and credentials.
+
+ If the access details seem correct, try resetting the BMC using, for
+ example, its web UI.
+
+``deploying``/``inspecting``/``cleaning``
+ means that the conductor is doing some active work. It may include
+ downloading or converting images, executing synchronous out-of-band deploy
+ or clean steps, etc. A node can stay in this state for minutes, depending
+ on various factors. Consult the conductor logs.
+
+``available``/``manageable``/``wait call-back``/``clean wait``
+ means that some background process is holding the lock. Most commonly it's
+ the power synchronization loop. Similarly to the ``verifying`` state,
+ it may mean that the BMC access is broken or too slow. The conductor logs
+ will provide you insights on what is happening.
+
+To trace the process using conductor logs:
+
+#. Isolate the relevant log parts. Lock messages come from the
+ ``ironic.conductor.task_manager`` module. You can also check the
+ ``ironic.common.states`` module for any state transitions:
+
+ .. code-block:: console
+
+ $ grep -E '(ironic.conductor.task_manager|ironic.common.states|NodeLocked)' \
+ conductor.log > state.log
+
+#. Find the first instance of ``NodeLocked``. It may look like this (stripping
+ timestamps and request IDs here and below for readability)::
+
+ DEBUG ironic.conductor.task_manager [-] Attempting to get exclusive lock on node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 (for node update) __init__ /usr/lib/python3.6/site-packages/ironic/conductor/task_manager.py:233
+ DEBUG ironic_lib.json_rpc.server [-] RPC error NodeLocked: Node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 is locked by host 192.168.57.53, please retry after the current operation is completed. _handle_error /usr/lib/python3.6/site-packages/ironic_lib/json_rpc/server.py:179
+
+ The events right before this failure will provide you a clue on why the lock
+ is held.
+
+#. Find the last successful **exclusive** locking event before the failure, for
+ example::
+
+ DEBUG ironic.conductor.task_manager [-] Attempting to get exclusive lock on node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 (for provision action manage) __init__ /usr/lib/python3.6/site-packages/ironic/conductor/task_manager.py:233
+ DEBUG ironic.conductor.task_manager [-] Node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 successfully reserved for provision action manage (took 0.01 seconds) reserve_node /usr/lib/python3.6/site-packages/ironic/conductor/task_manager.py:350
+ DEBUG ironic.common.states [-] Exiting old state 'enroll' in response to event 'manage' on_exit /usr/lib/python3.6/site-packages/ironic/common/states.py:307
+ DEBUG ironic.common.states [-] Entering new state 'verifying' in response to event 'manage' on_enter /usr/lib/python3.6/site-packages/ironic/common/states.py:313
+
+ This is your root cause, the lock is held because of the BMC credentials
+ verification.
+
+#. Find when the lock is released (if at all). The messages look like this::
+
+ DEBUG ironic.conductor.task_manager [-] Successfully released exclusive lock for provision action manage on node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 (lock was held 60.02 sec) release_resources /usr/lib/python3.6/site-packages/ironic/conductor/task_manager.py:447
+
+ The message tells you the reason the lock was held (``for provision action
+ manage``) and the amount of time it was held (60.02 seconds, which is way
+ too much for accessing a BMC).
+
+Unfortunately, due to the way the conductor is designed, it is not possible to
+gracefully break a stuck lock held in ``*-ing`` states. As the last resort, you
+may need to restart the affected conductor. See `Why are my nodes stuck in a
+"-ing" state?`_.
diff --git a/doc/source/contributor/jobs-description.rst b/doc/source/contributor/jobs-description.rst
index ebcef767e..00fe074ca 100644
--- a/doc/source/contributor/jobs-description.rst
+++ b/doc/source/contributor/jobs-description.rst
@@ -47,14 +47,14 @@ The description of each jobs that runs in the CI when you submit a patch for
`pxe` boot and `snmp` driver.
Runs tempest tests that match the regex
`ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal.
- * - ironic-tempest-ipa-partition-pxe_ipmitool
- - Deploys Ironic in DevStack under Python3, configured to use dib
- ramdisk partition image with `pxe` boot and `ipmi` driver.
+ * - ironic-tempest-partition-bios-ipmi-pxe
+ - Deploys Ironic in DevStack, configured to use dib ramdisk, a partition
+ image, `pxe` boot in legacy mode and `ipmi` hardware type.
Runs tempest tests that match the regex
`ironic_tempest_plugin.tests.scenario` and deploy 1 virtual baremetal.
- * - ironic-tempest-ipa-partition-uefi-pxe_ipmitool
- - Deploys Ironic in DevStack, configured to use dib ramdisk partition
- image with `uefi` boot and `ipmi` driver.
+ * - ironic-tempest-partition-uefi-ipmi-pxe
+ - Deploys Ironic in DevStack, configured to use dib ramdisk, a partition
+ image, `pxe` boot in UEFI mode and `ipmi` hardware type.
Runs tempest tests that match the regex
`ironic_tempest_plugin.tests.scenario`, also deploys 1 virtual
baremetal.
diff --git a/doc/source/install/configure-glance-images.rst b/doc/source/install/configure-glance-images.rst
index 361a31a7b..7c7f2ee57 100644
--- a/doc/source/install/configure-glance-images.rst
+++ b/doc/source/install/configure-glance-images.rst
@@ -12,9 +12,30 @@ Add images to the Image service
and note the image UUIDs in the Image service for each one as it is
generated.
- For *partition images*:
+ - For *whole disk images* just upload the image:
- - Add the kernel and ramdisk images to the Image service:
+ .. code-block:: console
+
+ $ openstack image create my-whole-disk-image --public \
+ --disk-format qcow2 --container-format bare \
+ --file my-whole-disk-image.qcow2
+
+ .. warning::
+ The kernel/ramdisk pair must not be set for whole disk images,
+ otherwise they'll be mistaken for partition images.
+
+ - For *partition images* to be used only with *local boot* (the default)
+ the ``img_type`` property must be set:
+
+ .. code-block:: console
+
+ $ openstack image create my-image --public \
+ --disk-format qcow2 --container-format bare \
+ --property img_type=partition --file my-image.qcow2
+
+ - For *partition images* to be used with both *local* and *network* boot:
+
+ Add the kernel and ramdisk images to the Image service:
.. code-block:: console
@@ -30,7 +51,7 @@ Add images to the Image service
Store the image UUID obtained from the above step as ``MY_INITRD_UUID``.
- - Add the *my-image* to the Image service which is going to be the OS
+ Add the *my-image* to the Image service which is going to be the OS
that the user is going to run. Also associate the above created
images with this OS image. These two operations can be done by
executing the following command:
@@ -42,19 +63,6 @@ Add images to the Image service
kernel_id=$MY_VMLINUZ_UUID --property \
ramdisk_id=$MY_INITRD_UUID --file my-image.qcow2
- For *whole disk images*, skip uploading and configuring kernel and ramdisk
- images completely, proceed directly to uploading the main image:
-
- .. code-block:: console
-
- $ openstack image create my-whole-disk-image --public \
- --disk-format qcow2 --container-format bare \
- --file my-whole-disk-image.qcow2
-
- .. warning::
- The kernel/initramfs pair must not be set for whole disk images,
- otherwise they'll be mistaken for partition images.
-
#. Build or download the deploy images
The deploy images are used initially for preparing the server (creating disk
diff --git a/doc/source/install/install.rst b/doc/source/install/install.rst
index 75a9b8f39..b2c9e1da8 100644
--- a/doc/source/install/install.rst
+++ b/doc/source/install/install.rst
@@ -1,12 +1,33 @@
-.. _install:
-
Install and configure the Bare Metal service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the
-Bare Metal service, code-named ironic.
+Bare Metal service, code-named ironic, manually from packages on one of the
+three popular families of Linux distributions.
+
+Alternatively, you can use one of the numerous projects that install ironic.
+One of them is provided by the bare metal team:
+
+* `Bifrost <https://docs.openstack.org/bifrost/latest/>`_ installs ironic in
+ the standalone mode (without the rest of OpenStack).
+
+More installation projects are developed by other OpenStack teams:
+
+* `Kolla
+ <https://docs.openstack.org/kolla-ansible/latest/reference/bare-metal/ironic-guide.html>`_
+ can install ironic in containers as part of OpenStack.
+* OpenStack-Ansible has a `role to install ironic
+ <https://docs.openstack.org/openstack-ansible-os_ironic/latest/>`_.
+* TripleO uses ironic for provisioning bare metal nodes and can also be used
+ `to install ironic
+ <https://docs.openstack.org/project-deploy-guide/tripleo-docs/latest/features/baremetal_overcloud.html>`_.
+
+.. NOTE(dtantsur): add your favourite installation tool, but please link to the
+ **Ironic guide**, not to the generic page. If a separate Ironic guide does
+ not exist yet, create it first.
-Note that installation and configuration vary by distribution.
+Contents
+--------
.. toctree::
:maxdepth: 2
diff --git a/doc/source/user/creating-images.rst b/doc/source/user/creating-images.rst
index 6aeaaf529..efcd632a5 100644
--- a/doc/source/user/creating-images.rst
+++ b/doc/source/user/creating-images.rst
@@ -9,8 +9,8 @@ the end user. There are two types of user images:
*partition images*
contain only the contents of the root partition. Additionally, two more
- images are used together with them: an image with a kernel and with
- an initramfs.
+ images are used together with them when booting from network: an image with
+ a kernel and with an initramfs.
.. warning::
To use partition images with local boot, Grub2 must be installed on
diff --git a/doc/source/user/deploy.rst b/doc/source/user/deploy.rst
index f462fa0e5..9b4609289 100644
--- a/doc/source/user/deploy.rst
+++ b/doc/source/user/deploy.rst
@@ -100,19 +100,18 @@ You need to specify image information in the node's ``instance_info``
$ cd /path/to/http/root
$ md5sum *.img > checksums
-* ``kernel``, ``ramdisk`` - HTTP(s) or file URLs of the kernel and
- initramfs of the target OS. Must be added **only** for partition images.
- Supports the same schemes as ``image_source``.
+* ``kernel``, ``ramdisk`` - HTTP(s) or file URLs of the kernel and initramfs of
+ the target OS. Must be added **only** for partition images and only if
+ network boot is required. Supports the same schemes as ``image_source``.
-An example for a partition image:
+An example for a partition image with local boot:
.. code-block:: shell
baremetal node set $NODE_UUID \
--instance-info image_source=http://image.server/my-image.qcow2 \
--instance-info image_checksum=1f9c0e1bad977a954ba40928c1e11f33 \
- --instance-info kernel=http://image.server/my-image.kernel \
- --instance-info ramdisk=http://image.server/my-image.initramfs \
+ --instance-info image_type=partition \
--instance-info root_gb=10
With a SHA256 hash:
@@ -123,6 +122,17 @@ With a SHA256 hash:
--instance-info image_source=http://image.server/my-image.qcow2 \
--instance-info image_os_hash_algo=sha256 \
--instance-info image_os_hash_value=a64dd95e0c48e61ed741ff026d8c89ca38a51f3799955097c5123b1705ef13d4 \
+ --instance-info image_type=partition \
+ --instance-info root_gb=10
+
+If you use network boot (or Ironic before Yoga), two more fields must be set:
+
+.. code-block:: shell
+
+ baremetal node set $NODE_UUID \
+ --instance-info image_source=http://image.server/my-image.qcow2 \
+ --instance-info image_checksum=1f9c0e1bad977a954ba40928c1e11f33 \
+ --instance-info image_type=partition \
--instance-info kernel=http://image.server/my-image.kernel \
--instance-info ramdisk=http://image.server/my-image.initramfs \
--instance-info root_gb=10
diff --git a/driver-requirements.txt b/driver-requirements.txt
index 5239fe73c..da312468e 100644
--- a/driver-requirements.txt
+++ b/driver-requirements.txt
@@ -4,10 +4,10 @@
# python projects they should package as optional dependencies for Ironic.
# These are available on pypi
-proliantutils>=2.11.0
+proliantutils>=2.13.0
pysnmp>=4.3.0,<5.0.0
python-scciclient>=0.8.0
-python-dracclient>=5.1.0,<8.0.0
+python-dracclient>=5.1.0,<9.0.0
python-xclarityclient>=0.1.6
# Ansible-deploy interface
diff --git a/ironic/api/controllers/v1/allocation.py b/ironic/api/controllers/v1/allocation.py
index cff380409..7884df1fa 100644
--- a/ironic/api/controllers/v1/allocation.py
+++ b/ironic/api/controllers/v1/allocation.py
@@ -104,7 +104,7 @@ def allocation_sanitize(allocation, fields):
api_utils.sanitize_dict(allocation, fields)
-def list_convert_with_links(rpc_allocations, limit, url=None, fields=None,
+def list_convert_with_links(rpc_allocations, limit, url, fields=None,
**kwargs):
return collection.list_convert_with_links(
items=[convert_with_links(p, fields=fields,
@@ -136,7 +136,7 @@ class AllocationsController(pecan.rest.RestController):
def _get_allocations_collection(self, node_ident=None, resource_class=None,
state=None, owner=None, marker=None,
limit=None, sort_key='id', sort_dir='asc',
- resource_url=None, fields=None,
+ resource_url='allocations', fields=None,
parent_node=None):
"""Return allocations collection.
diff --git a/ironic/api/controllers/v1/chassis.py b/ironic/api/controllers/v1/chassis.py
index 9c280fa58..e04a37557 100644
--- a/ironic/api/controllers/v1/chassis.py
+++ b/ironic/api/controllers/v1/chassis.py
@@ -78,7 +78,7 @@ def convert_with_links(rpc_chassis, fields=None, sanitize=True):
return chassis
-def list_convert_with_links(rpc_chassis_list, limit, url=None, fields=None,
+def list_convert_with_links(rpc_chassis_list, limit, url, fields=None,
**kwargs):
return collection.list_convert_with_links(
items=[convert_with_links(ch, fields=fields,
@@ -164,7 +164,8 @@ class ChassisController(rest.RestController):
DEFAULT_RETURN_FIELDS)
return self._get_chassis_collection(marker, limit, sort_key, sort_dir,
- fields=fields, detail=detail)
+ fields=fields, detail=detail,
+ resource_url='chassis')
@METRICS.timer('ChassisController.detail')
@method.expose()
@@ -188,9 +189,8 @@ class ChassisController(rest.RestController):
if parent != "chassis":
raise exception.HTTPNotFound()
- resource_url = '/'.join(['chassis', 'detail'])
return self._get_chassis_collection(marker, limit, sort_key, sort_dir,
- resource_url)
+ resource_url='chassis/detail')
@METRICS.timer('ChassisController.get_one')
@method.expose()
diff --git a/ironic/api/controllers/v1/collection.py b/ironic/api/controllers/v1/collection.py
index cdcf13712..342efb873 100644
--- a/ironic/api/controllers/v1/collection.py
+++ b/ironic/api/controllers/v1/collection.py
@@ -22,7 +22,7 @@ def has_next(collection, limit):
return len(collection) and len(collection) == limit
-def list_convert_with_links(items, item_name, limit, url=None, fields=None,
+def list_convert_with_links(items, item_name, limit, url, fields=None,
sanitize_func=None, key_field='uuid',
sanitizer_args=None, **kwargs):
"""Build a collection dict including the next link for paging support.
@@ -49,6 +49,10 @@ def list_convert_with_links(items, item_name, limit, url=None, fields=None,
:returns:
A dict containing ``item_name`` and ``next`` values
"""
+ assert url, "BUG: collections require a base URL"
+ assert limit is None or isinstance(limit, int), \
+ f"BUG: limit must be None or int, got {type(limit)}"
+
items_dict = {
item_name: items
}
@@ -68,7 +72,7 @@ def list_convert_with_links(items, item_name, limit, url=None, fields=None,
return items_dict
-def get_next(collection, limit, url=None, key_field='uuid', **kwargs):
+def get_next(collection, limit, url, key_field='uuid', **kwargs):
"""Return a link to the next subset of the collection."""
if not has_next(collection, limit):
return None
diff --git a/ironic/api/controllers/v1/conductor.py b/ironic/api/controllers/v1/conductor.py
index 61cbba78a..d421e835e 100644
--- a/ironic/api/controllers/v1/conductor.py
+++ b/ironic/api/controllers/v1/conductor.py
@@ -71,7 +71,7 @@ class ConductorsController(rest.RestController):
invalid_sort_key_list = ['alive', 'drivers']
def _get_conductors_collection(self, marker, limit, sort_key, sort_dir,
- resource_url=None, fields=None,
+ resource_url='conductors', fields=None,
detail=None):
limit = api_utils.validate_limit(limit)
diff --git a/ironic/api/controllers/v1/deploy_template.py b/ironic/api/controllers/v1/deploy_template.py
index e54b0d63b..dcf7122e0 100644
--- a/ironic/api/controllers/v1/deploy_template.py
+++ b/ironic/api/controllers/v1/deploy_template.py
@@ -140,6 +140,7 @@ def list_convert_with_links(rpc_templates, limit, fields=None, **kwargs):
items=[convert_with_links(t, fields=fields, sanitize=False)
for t in rpc_templates],
item_name='deploy_templates',
+ url='deploy_templates',
limit=limit,
fields=fields,
sanitize_func=template_sanitize,
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index bfe967084..f182e2fd2 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -1650,8 +1650,7 @@ def _node_sanitize_extended(node, node_keys, target_dict, cdict):
'driver_internal_info permission. **'}
-def node_list_convert_with_links(nodes, limit, url=None, fields=None,
- **kwargs):
+def node_list_convert_with_links(nodes, limit, url, fields=None, **kwargs):
cdict = api.request.context.to_policy_values()
target_dict = dict(cdict)
sanitizer_args = {
@@ -1890,25 +1889,19 @@ class NodeHistoryController(rest.RestController):
@METRICS.timer('NodeHistoryController.get_all')
@method.expose()
- @args.validate(details=args.boolean, marker=args.uuid, limit=args.integer)
- def get_all(self, **kwargs):
+ @args.validate(detail=args.boolean, marker=args.uuid, limit=args.integer)
+ def get_all(self, detail=False, marker=None, limit=None):
"""List node history."""
node = api_utils.check_node_policy_and_retrieve(
'baremetal:node:history:get', self.node_ident)
- if kwargs.get('detail'):
- detail = True
- fields = self.detail_fields
- else:
- detail = False
- fields = self.standard_fields
+ fields = self.detail_fields if detail else self.standard_fields
marker_obj = None
- marker = kwargs.get('marker')
if marker:
marker_obj = objects.NodeHistory.get_by_uuid(api.request.context,
marker)
- limit = kwargs.get('limit')
+ limit = api_utils.validate_limit(limit)
events = objects.NodeHistory.list_by_node_id(api.request.context,
node.id,
@@ -1921,6 +1914,7 @@ class NodeHistoryController(rest.RestController):
node.uuid, event, detail=detail) for event in events
],
item_name='history',
+ url=f'nodes/{self.node_ident}/history',
fields=fields,
marker=marker_obj,
limit=limit,
@@ -2288,7 +2282,6 @@ class NodesController(rest.RestController):
fields = api_utils.get_request_return_fields(fields, detail,
_DEFAULT_RETURN_FIELDS)
- resource_url = 'nodes'
extra_args = {'description_contains': description_contains}
return self._get_nodes_collection(chassis_uuid, instance_uuid,
associated, maintenance, retired,
@@ -2296,7 +2289,7 @@ class NodesController(rest.RestController):
limit, sort_key, sort_dir,
driver=driver,
resource_class=resource_class,
- resource_url=resource_url,
+ resource_url='nodes',
fields=fields, fault=fault,
conductor_group=conductor_group,
detail=detail,
@@ -2379,7 +2372,6 @@ class NodesController(rest.RestController):
api_utils.check_allow_filter_by_conductor(conductor)
- resource_url = '/'.join(['nodes', 'detail'])
extra_args = {'description_contains': description_contains}
return self._get_nodes_collection(chassis_uuid, instance_uuid,
associated, maintenance, retired,
@@ -2387,7 +2379,7 @@ class NodesController(rest.RestController):
limit, sort_key, sort_dir,
driver=driver,
resource_class=resource_class,
- resource_url=resource_url,
+ resource_url='nodes/detail',
fault=fault,
conductor_group=conductor_group,
conductor=conductor,
diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py
index 0658fbf3f..adc21ebc2 100644
--- a/ironic/api/controllers/v1/port.py
+++ b/ironic/api/controllers/v1/port.py
@@ -160,7 +160,7 @@ def port_sanitize(port, fields=None):
api_utils.sanitize_dict(port, fields)
-def list_convert_with_links(rpc_ports, limit, url=None, fields=None, **kwargs):
+def list_convert_with_links(rpc_ports, limit, url, fields=None, **kwargs):
ports = []
for rpc_port in rpc_ports:
try:
@@ -407,10 +407,9 @@ class PortsController(rest.RestController):
and not uuidutils.is_uuid_like(node)):
raise exception.NotAcceptable()
- resource_url = 'ports'
return self._get_ports_collection(node_uuid or node, address,
portgroup, marker, limit, sort_key,
- sort_dir, resource_url=resource_url,
+ sort_dir, resource_url='ports',
fields=fields, detail=detail,
project=project)
@@ -466,10 +465,10 @@ class PortsController(rest.RestController):
if parent != "ports":
raise exception.HTTPNotFound()
- resource_url = '/'.join(['ports', 'detail'])
return self._get_ports_collection(node_uuid or node, address,
portgroup, marker, limit, sort_key,
- sort_dir, resource_url,
+ sort_dir,
+ resource_url='ports/detail',
project=project)
@METRICS.timer('PortsController.get_one')
diff --git a/ironic/api/controllers/v1/portgroup.py b/ironic/api/controllers/v1/portgroup.py
index 7900c4683..6c68e07ba 100644
--- a/ironic/api/controllers/v1/portgroup.py
+++ b/ironic/api/controllers/v1/portgroup.py
@@ -113,8 +113,7 @@ def convert_with_links(rpc_portgroup, fields=None, sanitize=True):
return portgroup
-def list_convert_with_links(rpc_portgroups, limit, url=None, fields=None,
- **kwargs):
+def list_convert_with_links(rpc_portgroups, limit, url, fields=None, **kwargs):
return collection.list_convert_with_links(
items=[convert_with_links(p, fields=fields, sanitize=False)
for p in rpc_portgroups],
@@ -283,12 +282,11 @@ class PortgroupsController(pecan.rest.RestController):
fields = api_utils.get_request_return_fields(fields, detail,
_DEFAULT_RETURN_FIELDS)
- resource_url = 'portgroups'
return self._get_portgroups_collection(node, address,
marker, limit,
sort_key, sort_dir,
fields=fields,
- resource_url=resource_url,
+ resource_url='portgroups',
detail=detail,
project=project)
@@ -332,10 +330,9 @@ class PortgroupsController(pecan.rest.RestController):
if parent != "portgroups":
raise exception.HTTPNotFound()
- resource_url = '/'.join(['portgroups', 'detail'])
return self._get_portgroups_collection(
node, address, marker, limit, sort_key, sort_dir,
- resource_url=resource_url, project=project)
+ resource_url='portgroups/detail', project=project)
@METRICS.timer('PortgroupsController.get_one')
@method.expose()
diff --git a/ironic/api/controllers/v1/volume_connector.py b/ironic/api/controllers/v1/volume_connector.py
index 100822029..dd003e867 100644
--- a/ironic/api/controllers/v1/volume_connector.py
+++ b/ironic/api/controllers/v1/volume_connector.py
@@ -83,7 +83,7 @@ def convert_with_links(rpc_connector, fields=None, sanitize=True):
return connector
-def list_convert_with_links(rpc_connectors, limit, url=None, fields=None,
+def list_convert_with_links(rpc_connectors, limit, url, fields=None,
detail=None, **kwargs):
if detail:
kwargs['detail'] = detail
diff --git a/ironic/api/controllers/v1/volume_target.py b/ironic/api/controllers/v1/volume_target.py
index d98f461ed..2fe1afb03 100644
--- a/ironic/api/controllers/v1/volume_target.py
+++ b/ironic/api/controllers/v1/volume_target.py
@@ -95,7 +95,7 @@ def convert_with_links(rpc_target, fields=None, sanitize=True):
return target
-def list_convert_with_links(rpc_targets, limit, url=None, fields=None,
+def list_convert_with_links(rpc_targets, limit, url, fields=None,
detail=None, **kwargs):
if detail:
kwargs['detail'] = detail
diff --git a/ironic/cmd/api.py b/ironic/cmd/api.py
index 2323c4b09..f75aa9ee7 100644
--- a/ironic/cmd/api.py
+++ b/ironic/cmd/api.py
@@ -39,7 +39,7 @@ def main():
launcher = ironic_service.process_launcher()
server = wsgi_service.WSGIService('ironic_api', CONF.api.enable_ssl_api)
launcher.launch_service(server, workers=server.workers)
- launcher.wait()
+ sys.exit(launcher.wait())
if __name__ == '__main__':
diff --git a/ironic/cmd/conductor.py b/ironic/cmd/conductor.py
index 843185890..a932cb22f 100644
--- a/ironic/cmd/conductor.py
+++ b/ironic/cmd/conductor.py
@@ -67,7 +67,13 @@ def main():
issue_startup_warnings(CONF)
launcher = service.launch(CONF, mgr, restart_method='mutate')
- launcher.wait()
+
+ # NOTE(dtantsur): handling start-up failures before launcher.wait() helps
+ # notify systemd about them. Otherwise the launcher will report successful
+ # service start-up before checking the threads.
+ mgr.wait_for_start()
+
+ sys.exit(launcher.wait())
if __name__ == '__main__':
diff --git a/ironic/cmd/singleprocess.py b/ironic/cmd/singleprocess.py
index ea2e01365..675bd1bc2 100644
--- a/ironic/cmd/singleprocess.py
+++ b/ironic/cmd/singleprocess.py
@@ -49,4 +49,9 @@ def main():
wsgi = wsgi_service.WSGIService('ironic_api', CONF.api.enable_ssl_api)
launcher.launch_service(wsgi)
- launcher.wait()
+ # NOTE(dtantsur): handling start-up failures before launcher.wait() helps
+ # notify systemd about them. Otherwise the launcher will report successful
+ # service start-up before checking the threads.
+ mgr.wait_for_start()
+
+ sys.exit(launcher.wait())
diff --git a/ironic/common/glance_service/image_service.py b/ironic/common/glance_service/image_service.py
index 66ac693f8..0a32eaf0a 100644
--- a/ironic/common/glance_service/image_service.py
+++ b/ironic/common/glance_service/image_service.py
@@ -303,15 +303,20 @@ class GlanceImageService(object):
'but it was not found in the service catalog. You must '
'provide "swift_endpoint_url" as a config option.'))
+ swift_account_prefix = CONF.glance.swift_account_prefix
+ if swift_account_prefix and not swift_account_prefix.endswith('_'):
+ swift_account_prefix = '%s_' % swift_account_prefix
+
# Strip /v1/AUTH_%(tenant_id)s, if present
- endpoint_url = re.sub('/v1/AUTH_[^/]+/?$', '', endpoint_url)
+ endpoint_url = re.sub('/v1/%s[^/]+/?$' % swift_account_prefix, '',
+ endpoint_url)
key = CONF.glance.swift_temp_url_key
account = CONF.glance.swift_account
if not account:
swift_session = swift.get_swift_session()
auth_ref = swift_session.auth.get_auth_ref(swift_session)
- account = 'AUTH_%s' % auth_ref.project_id
+ account = '%s%s' % (swift_account_prefix, auth_ref.project_id)
if not key:
swift_api = swift.SwiftAPI()
diff --git a/ironic/common/images.py b/ironic/common/images.py
index cf51d723c..9f771ca29 100644
--- a/ironic/common/images.py
+++ b/ironic/common/images.py
@@ -120,7 +120,8 @@ def create_vfat_image(output_file, files_info=None, parameters=None,
# NOTE: FAT filesystem label can be up to 11 characters long.
# TODO(sbaker): use ironic_lib.utils.mkfs when rootwrap has been
# removed
- utils.execute('mkfs', '-t', 'vfat', '-n', 'ir-vfd-de', output_file)
+ utils.execute('mkfs', '-t', 'vfat', '-n',
+ 'ir-vfd-dev', output_file)
except processutils.ProcessExecutionError as e:
raise exception.ImageCreationFailed(image_type='vfat', error=e)
@@ -135,10 +136,16 @@ def create_vfat_image(output_file, files_info=None, parameters=None,
file_contents = '\n'.join(params_list)
utils.write_to_file(parameters_file, file_contents)
+ file_list = os.listdir(tmpdir)
+
+ if not file_list:
+ return
+
+ file_list = [os.path.join(tmpdir, item) for item in file_list]
+
# use mtools to copy the files into the image in a single
# operation
- utils.execute('mcopy', '-s', '%s/*' % tmpdir,
- '-i', output_file, '::')
+ utils.execute('mcopy', '-s', *file_list, '-i', output_file, '::')
except Exception as e:
LOG.exception("vfat image creation failed. Error: %s", e)
@@ -570,6 +577,11 @@ def create_boot_iso(context, output_filename, kernel_href,
kernel_params=params, inject_files=inject_files)
+IMAGE_TYPE_PARTITION = 'partition'
+IMAGE_TYPE_WHOLE_DISK = 'whole-disk'
+VALID_IMAGE_TYPES = frozenset((IMAGE_TYPE_PARTITION, IMAGE_TYPE_WHOLE_DISK))
+
+
def is_whole_disk_image(ctx, instance_info):
"""Find out if the image is a partition image or a whole disk image.
@@ -583,12 +595,22 @@ def is_whole_disk_image(ctx, instance_info):
if not image_source:
return
+ image_type = instance_info.get('image_type')
+ if image_type:
+ # This logic reflects the fact that whole disk images are the default
+ return image_type != IMAGE_TYPE_PARTITION
+
is_whole_disk_image = False
if glance_utils.is_glance_image(image_source):
try:
iproperties = get_image_properties(ctx, image_source)
except Exception:
return
+
+ image_type = iproperties.get('img_type')
+ if image_type:
+ return image_type != IMAGE_TYPE_PARTITION
+
is_whole_disk_image = (not iproperties.get('kernel_id')
and not iproperties.get('ramdisk_id'))
else:
diff --git a/ironic/common/kickstart_utils.py b/ironic/common/kickstart_utils.py
index 519cb5326..433cf2390 100644
--- a/ironic/common/kickstart_utils.py
+++ b/ironic/common/kickstart_utils.py
@@ -55,7 +55,7 @@ def _get_config_drive_dict_from_iso(
iso_path=iso_file_path, outfp=b_buf
)
b_buf.seek(0)
- content = b"\n".join(b_buf.readlines()).decode('utf-8')
+ content = b"".join(b_buf.readlines()).decode('utf-8')
drive_dict[target_file_path] = content
@@ -113,8 +113,7 @@ def _fetch_config_drive_from_url(url):
"Can't download the configdrive content from '%(url)s'. "
"Reason: %(reason)s" %
{'url': url, 'reason': e})
- config_drive_iso = decode_and_extract_config_drive_iso(config_drive)
- return read_iso9600_config_drive(config_drive_iso)
+ return config_drive
def _write_config_drive_content(content, file_path):
@@ -152,10 +151,15 @@ def prepare_config_drive(task,
if not config_drive:
return ks_config_drive
- if not isinstance(config_drive, dict) and \
- ironic_utils.is_http_url(config_drive):
+ if ironic_utils.is_http_url(config_drive):
config_drive = _fetch_config_drive_from_url(config_drive)
+ if not isinstance(config_drive, dict):
+ # The config drive is in iso6600 format, gzipped and base-64-encoded.
+ # Convert it to a dict.
+ config_drive_iso = decode_and_extract_config_drive_iso(config_drive)
+ config_drive = read_iso9600_config_drive(config_drive_iso)
+
for key in sorted(config_drive.keys()):
target_path = os.path.join(config_drive_path, key)
ks_config_drive += _write_config_drive_content(
diff --git a/ironic/common/policy.py b/ironic/common/policy.py
index 94378fb07..d613aec06 100644
--- a/ironic/common/policy.py
+++ b/ironic/common/policy.py
@@ -1862,7 +1862,7 @@ def authorize(rule, target, creds, *args, **kwargs):
return enforcer.authorize(rule, target, creds, do_raise=True,
*args, **kwargs)
except policy.PolicyNotAuthorized as e:
- LOG.error('Rejecting authorzation: %(error)s',
+ LOG.error('Rejecting authorization: %(error)s',
{'error': e})
raise exception.HTTPForbidden(resource=rule)
diff --git a/ironic/common/pxe_utils.py b/ironic/common/pxe_utils.py
index e87f73185..42d860f8d 100644
--- a/ironic/common/pxe_utils.py
+++ b/ironic/common/pxe_utils.py
@@ -18,6 +18,7 @@ import copy
import os
import shutil
import tempfile
+from urllib import parse as urlparse
from ironic_lib import utils as ironic_utils
import jinja2
@@ -667,47 +668,68 @@ def get_instance_image_info(task, ipxe_enabled=False):
return image_info
- labels = ('kernel', 'ramdisk')
+ image_properties = None
d_info = deploy_utils.get_image_instance_info(node)
+
+ def _get_image_properties():
+ nonlocal image_properties
+ if not image_properties:
+ glance_service = service.GlanceImageService(context=ctx)
+ image_properties = glance_service.show(
+ d_info['image_source'])['properties']
+
+ labels = ('kernel', 'ramdisk')
if not (i_info.get('kernel') and i_info.get('ramdisk')):
- glance_service = service.GlanceImageService(context=ctx)
- iproperties = glance_service.show(d_info['image_source'])['properties']
+ # NOTE(rloo): If both are not specified in instance_info
+ # we won't use any of them. We'll use the values specified
+ # with the image, which we assume have been set.
+ _get_image_properties()
for label in labels:
- i_info[label] = str(iproperties[label + '_id'])
+ i_info[label] = str(image_properties[label + '_id'])
node.instance_info = i_info
node.save()
anaconda_labels = ()
if deploy_utils.get_boot_option(node) == 'kickstart':
- # stage2 - Installer stage2 squashfs image
- # ks_template - Anaconda kickstart template
+ # stage2: installer stage2 squashfs image
+ # ks_template: anaconda kickstart template
# ks_cfg - rendered ks_template
anaconda_labels = ('stage2', 'ks_template', 'ks_cfg')
- if not (i_info.get('stage2') and i_info.get('ks_template')):
- iproperties = glance_service.show(
- d_info['image_source']
- )['properties']
- for label in anaconda_labels:
- # ks_template is an optional property on the image
- if (label == 'ks_template'
- and not iproperties.get('ks_template')):
- i_info[label] = CONF.anaconda.default_ks_template
- elif label == 'ks_cfg':
- i_info[label] = ''
- elif label == 'stage2' and 'stage2_id' not in iproperties:
- msg = ("stage2_id property missing on the image. "
- "The anaconda deploy interface requires stage2_id "
- "property to be associated with the os image. ")
+ # NOTE(rloo): We save stage2 & ks_template values in case they
+ # are changed by the user after we start using them and to
+ # prevent re-computing them again.
+ if not node.driver_internal_info.get('stage2'):
+ if i_info.get('stage2'):
+ node.set_driver_internal_info('stage2', i_info['stage2'])
+ else:
+ _get_image_properties()
+ if 'stage2_id' not in image_properties:
+ msg = (_("'stage2_id' is missing from the properties of "
+ "the OS image %s. The anaconda deploy interface "
+ "requires this to be set with the OS image or "
+ "in instance_info['stage2']. ") %
+ d_info['image_source'])
raise exception.ImageUnacceptable(msg)
else:
- i_info[label] = str(iproperties['stage2_id'])
-
- node.instance_info = i_info
+ node.set_driver_internal_info(
+ 'stage2', str(image_properties['stage2_id']))
+ if i_info.get('ks_template'):
+ node.set_driver_internal_info('ks_template',
+ i_info['ks_template'])
+ else:
+ _get_image_properties()
+ # ks_template is an optional property on the image
+ if 'ks_template' not in image_properties:
+ node.set_driver_internal_info(
+ 'ks_template', CONF.anaconda.default_ks_template)
+ else:
+ node.set_driver_internal_info(
+ 'ks_template', str(image_properties['ks_template']))
node.save()
for label in labels + anaconda_labels:
image_info[label] = (
- i_info[label],
+ i_info.get(label) or node.driver_internal_info.get(label, ''),
get_file_path_from_label(node.uuid, root_dir, label)
)
@@ -744,6 +766,7 @@ def build_deploy_pxe_options(task, pxe_info, mode='deploy',
node = task.node
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
+ initrd_filename = ramdisk_label
for label, option in ((kernel_label, 'deployment_aki_path'),
(ramdisk_label, 'deployment_ari_path')):
if ipxe_enabled:
@@ -752,6 +775,10 @@ def build_deploy_pxe_options(task, pxe_info, mode='deploy',
and service_utils.is_glance_image(image_href)):
pxe_opts[option] = images.get_temp_url_for_glance_image(
task.context, image_href)
+ if label == ramdisk_label:
+ path = urlparse.urlparse(pxe_opts[option]).path.strip('/')
+ if path:
+ initrd_filename = path.split('/')[-1]
else:
pxe_opts[option] = '/'.join([CONF.deploy.http_url, node.uuid,
label])
@@ -759,7 +786,7 @@ def build_deploy_pxe_options(task, pxe_info, mode='deploy',
pxe_opts[option] = os.path.relpath(pxe_info[label][1],
CONF.pxe.tftp_root)
if ipxe_enabled:
- pxe_opts['initrd_filename'] = ramdisk_label
+ pxe_opts['initrd_filename'] = initrd_filename
return pxe_opts
@@ -941,6 +968,7 @@ def build_kickstart_config_options(task):
node.uuid
)
params['heartbeat_url'] = heartbeat_url
+ params['config_drive'] = ks_utils.prepare_config_drive(task)
return {'ks_options': params}
@@ -1051,6 +1079,7 @@ def validate_kickstart_template(ks_template):
"""
ks_options = {'liveimg_url': 'fake_image_url',
'agent_token': 'fake_token',
+ 'config_drive': '',
'heartbeat_url': 'fake_heartbeat_url'}
params = {'ks_options': ks_options}
try:
@@ -1090,16 +1119,17 @@ def validate_kickstart_file(ks_cfg):
return
with tempfile.NamedTemporaryFile(
- dir=CONF.tempdir, suffix='.cfg') as ks_file:
- ks_file.writelines(ks_cfg)
+ dir=CONF.tempdir, suffix='.cfg', mode='wt') as ks_file:
+ ks_file.write(ks_cfg)
+ ks_file.flush()
try:
- result = utils.execute(
+ utils.execute(
'ksvalidator', ks_file.name, check_on_exit=[0], attempts=1
)
- except processutils.ProcessExecutionError:
- msg = _(("The kickstart file generated does not pass validation. "
- "The ksvalidator tool returned following error(s): %s") %
- (result))
+ except processutils.ProcessExecutionError as e:
+ msg = (_("The kickstart file generated does not pass validation. "
+ "The ksvalidator tool returned the following error: %s") %
+ e)
raise exception.InvalidKickstartFile(msg)
@@ -1168,9 +1198,6 @@ def prepare_instance_kickstart_config(task, image_info, anaconda_boot=False):
ks_options = build_kickstart_config_options(task)
kickstart_template = image_info['ks_template'][1]
ks_cfg = utils.render_template(kickstart_template, ks_options)
- ks_config_drive = ks_utils.prepare_config_drive(task)
- if ks_config_drive:
- ks_cfg = ks_cfg + ks_config_drive
utils.write_to_file(image_info['ks_cfg'][1], ks_cfg,
CONF.pxe.file_permission)
@@ -1197,17 +1224,14 @@ def cache_ramdisk_kernel(task, pxe_info, ipxe_enabled=False):
else:
path = os.path.join(CONF.pxe.tftp_root, node.uuid)
ensure_tree(path)
- # anconda deploy will have 'stage2' as one of the labels in pxe_info dict
- if 'stage2' in pxe_info.keys():
- # stage2 will be stored in ipxe http directory. So make sure they
- # exist.
- ensure_tree(
- get_file_path_from_label(
- node.uuid,
- CONF.deploy.http_root,
- 'stage2'
- )
- )
+ # anaconda deploy will have 'stage2' as one of the labels in pxe_info dict
+ if 'stage2' in pxe_info:
+ # stage2 will be stored in ipxe http directory so make sure the
+ # directory exists.
+ file_path = get_file_path_from_label(node.uuid,
+ CONF.deploy.http_root,
+ 'stage2')
+ ensure_tree(os.path.dirname(file_path))
# ks_cfg is rendered later by the driver using ks_template. It cannot
# be fetched and cached.
t_pxe_info.pop('ks_cfg')
diff --git a/ironic/common/release_mappings.py b/ironic/common/release_mappings.py
index acb630f78..889fa77fd 100644
--- a/ironic/common/release_mappings.py
+++ b/ironic/common/release_mappings.py
@@ -390,13 +390,53 @@ RELEASE_MAPPING = {
'VolumeTarget': ['1.0'],
}
},
+ '19.0': {
+ 'api': '1.78',
+ 'rpc': '1.55',
+ 'objects': {
+ 'Allocation': ['1.1'],
+ 'BIOSSetting': ['1.1'],
+ 'Node': ['1.36'],
+ 'NodeHistory': ['1.0'],
+ 'Conductor': ['1.3'],
+ 'Chassis': ['1.3'],
+ 'Deployment': ['1.0'],
+ 'DeployTemplate': ['1.1'],
+ 'Port': ['1.10'],
+ 'Portgroup': ['1.4'],
+ 'Trait': ['1.0'],
+ 'TraitList': ['1.0'],
+ 'VolumeConnector': ['1.0'],
+ 'VolumeTarget': ['1.0'],
+ }
+ },
+ '20.0': {
+ 'api': '1.78',
+ 'rpc': '1.55',
+ 'objects': {
+ 'Allocation': ['1.1'],
+ 'BIOSSetting': ['1.1'],
+ 'Node': ['1.36'],
+ 'NodeHistory': ['1.0'],
+ 'Conductor': ['1.3'],
+ 'Chassis': ['1.3'],
+ 'Deployment': ['1.0'],
+ 'DeployTemplate': ['1.1'],
+ 'Port': ['1.10'],
+ 'Portgroup': ['1.4'],
+ 'Trait': ['1.0'],
+ 'TraitList': ['1.0'],
+ 'VolumeConnector': ['1.0'],
+ 'VolumeTarget': ['1.0'],
+ }
+ },
'master': {
'api': '1.78',
'rpc': '1.55',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.1'],
- 'Node': ['1.36', '1.35'],
+ 'Node': ['1.36'],
'NodeHistory': ['1.0'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
diff --git a/ironic/common/rpc_service.py b/ironic/common/rpc_service.py
index 78379c981..b0eec7758 100644
--- a/ironic/common/rpc_service.py
+++ b/ironic/common/rpc_service.py
@@ -15,6 +15,8 @@
# under the License.
import signal
+import sys
+import time
from ironic_lib.json_rpc import server as json_rpc
from oslo_config import cfg
@@ -42,9 +44,29 @@ class RPCService(service.Service):
self.topic = self.manager.topic
self.rpcserver = None
self.deregister = True
+ self._failure = None
+ self._started = False
+
+ def wait_for_start(self):
+ while not self._started and not self._failure:
+ time.sleep(0.1)
+ if self._failure:
+ LOG.critical(self._failure)
+ sys.exit(self._failure)
def start(self):
+ self._failure = None
+ self._started = False
super(RPCService, self).start()
+ try:
+ self._real_start()
+ except Exception as exc:
+ self._failure = f"{exc.__class__.__name__}: {exc}"
+ raise
+ else:
+ self._started = True
+
+ def _real_start(self):
admin_context = context.get_admin_context()
serializer = objects_base.IronicObjectSerializer(is_server=True)
diff --git a/ironic/common/wsgi_service.py b/ironic/common/wsgi_service.py
index e7bbe9dcd..abfe4b1f2 100644
--- a/ironic/common/wsgi_service.py
+++ b/ironic/common/wsgi_service.py
@@ -10,6 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import socket
+
+from ironic_lib import utils as il_utils
from oslo_concurrency import processutils
from oslo_service import service
from oslo_service import wsgi
@@ -46,10 +49,18 @@ class WSGIService(service.ServiceBase):
_("api_workers value of %d is invalid, "
"must be greater than 0.") % self.workers)
- self.server = wsgi.Server(CONF, name, self.app,
- host=CONF.api.host_ip,
- port=CONF.api.port,
- use_ssl=use_ssl)
+ if CONF.api.unix_socket:
+ il_utils.unlink_without_raise(CONF.api.unix_socket)
+ self.server = wsgi.Server(CONF, name, self.app,
+ socket_family=socket.AF_UNIX,
+ socket_file=CONF.api.unix_socket,
+ socket_mode=CONF.api.unix_socket_mode,
+ use_ssl=use_ssl)
+ else:
+ self.server = wsgi.Server(CONF, name, self.app,
+ host=CONF.api.host_ip,
+ port=CONF.api.port,
+ use_ssl=use_ssl)
def start(self):
"""Start serving this service using loaded configuration.
@@ -64,6 +75,8 @@ class WSGIService(service.ServiceBase):
:returns: None
"""
self.server.stop()
+ if CONF.api.unix_socket:
+ il_utils.unlink_without_raise(CONF.unix_socket)
def wait(self):
"""Wait for the service to stop serving this API.
diff --git a/ironic/conductor/cleaning.py b/ironic/conductor/cleaning.py
index 091ca64c6..cb801fc8c 100644
--- a/ironic/conductor/cleaning.py
+++ b/ironic/conductor/cleaning.py
@@ -68,8 +68,7 @@ def do_node_clean(task, clean_steps=None, disable_ramdisk=False):
if not disable_ramdisk:
task.driver.network.validate(task)
except exception.InvalidParameterValue as e:
- msg = (_('Validation failed. Cannot clean node %(node)s. '
- 'Error: %(msg)s') %
+ msg = (_('Validation of node %(node)s for cleaning failed: %(msg)s') %
{'node': node.uuid, 'msg': e})
return utils.cleaning_error_handler(task, msg)
@@ -115,7 +114,7 @@ def do_node_clean(task, clean_steps=None, disable_ramdisk=False):
task, disable_ramdisk=disable_ramdisk)
except (exception.InvalidParameterValue,
exception.NodeCleaningFailure) as e:
- msg = (_('Cannot clean node %(node)s. Error: %(msg)s')
+ msg = (_('Cannot clean node %(node)s: %(msg)s')
% {'node': node.uuid, 'msg': e})
return utils.cleaning_error_handler(task, msg)
diff --git a/ironic/conductor/deployments.py b/ironic/conductor/deployments.py
index 30f24e404..3b72dcb03 100644
--- a/ironic/conductor/deployments.py
+++ b/ironic/conductor/deployments.py
@@ -22,7 +22,6 @@ from oslo_utils import excutils
from ironic.common import exception
from ironic.common.glance_service import service_utils as glance_utils
from ironic.common.i18n import _
-from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.conductor import notification_utils as notify_utils
@@ -88,11 +87,8 @@ def start_deploy(task, manager, configdrive=None, event='deploy',
# Infer the image type to make sure the deploy driver
# validates only the necessary variables for different
# image types.
- # NOTE(sirushtim): The iwdi variable can be None. It's up to
- # the deploy driver to validate this.
- iwdi = images.is_whole_disk_image(task.context, node.instance_info)
- node.set_driver_internal_info('is_whole_disk_image', iwdi)
- node.save()
+ if utils.update_image_type(task.context, task.node):
+ node.save()
try:
task.driver.power.validate(task)
@@ -103,7 +99,7 @@ def start_deploy(task, manager, configdrive=None, event='deploy',
except exception.InvalidParameterValue as e:
raise exception.InstanceDeployFailure(
_("Failed to validate deploy or power info for node "
- "%(node_uuid)s. Error: %(msg)s") %
+ "%(node_uuid)s: %(msg)s") %
{'node_uuid': node.uuid, 'msg': e}, code=e.code)
try:
@@ -135,8 +131,7 @@ def do_node_deploy(task, conductor_id=None, configdrive=None,
task,
('Error while uploading the configdrive for %(node)s '
'to Swift') % {'node': node.uuid},
- _('Failed to upload the configdrive to Swift. '
- 'Error: %s') % e,
+ _('Failed to upload the configdrive to Swift: %s') % e,
clean_up=False)
except db_exception.DBDataError as e:
with excutils.save_and_reraise_exception():
@@ -197,7 +192,7 @@ def do_node_deploy(task, conductor_id=None, configdrive=None,
utils.deploying_error_handler(
task,
'Error while getting deploy steps; cannot deploy to node '
- '%(node)s. Error: %(err)s' % {'node': node.uuid, 'err': e},
+ '%(node)s: %(err)s' % {'node': node.uuid, 'err': e},
_("Cannot get deploy steps; failed to deploy: %s") % e)
if not node.driver_internal_info.get('deploy_steps'):
@@ -284,7 +279,7 @@ def do_next_deploy_step(task, step_index):
# Avoid double handling of failures. For example, set_failed_state
# from deploy_utils already calls deploying_error_handler.
if task.node.provision_state != states.DEPLOYFAIL:
- log_msg = ('Node %(node)s failed deploy step %(step)s. Error: '
+ log_msg = ('Node %(node)s failed deploy step %(step)s: '
'%(err)s' % {'node': node.uuid,
'step': node.deploy_step, 'err': e})
utils.deploying_error_handler(
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index 983b504c7..d11224852 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -56,7 +56,6 @@ from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import faults
from ironic.common.i18n import _
-from ironic.common import images
from ironic.common import network
from ironic.common import nova
from ironic.common import states
@@ -699,7 +698,7 @@ class ConductorManager(base_manager.BaseConductorManager):
raise exception.InstanceRescueFailure(
instance=node.instance_uuid,
node=node.uuid,
- reason=_("Validation failed. Error: %s") % e)
+ reason=_("Validation failed: %s") % e)
try:
task.process_event(
'rescue',
@@ -793,7 +792,7 @@ class ConductorManager(base_manager.BaseConductorManager):
raise exception.InstanceUnrescueFailure(
instance=node.instance_uuid,
node=node.uuid,
- reason=_("Validation failed. Error: %s") % e)
+ reason=_("Validation failed: %s") % e)
try:
task.process_event(
@@ -854,7 +853,7 @@ class ConductorManager(base_manager.BaseConductorManager):
task.driver.rescue.clean_up(task)
except Exception as e:
LOG.exception('Failed to clean up rescue for node %(node)s '
- 'after aborting the operation. Error: %(err)s',
+ 'after aborting the operation: %(err)s',
{'node': node.uuid, 'err': e})
error_msg = _('Failed to clean up rescue after aborting '
'the operation')
@@ -1074,7 +1073,7 @@ class ConductorManager(base_manager.BaseConductorManager):
with excutils.save_and_reraise_exception():
LOG.exception('Error in tear_down of node %(node)s: %(err)s',
{'node': node.uuid, 'err': e})
- error = _("Failed to tear down. Error: %s") % e
+ error = _("Failed to tear down: %s") % e
utils.node_history_record(task.node, event=error,
event_type=states.UNPROVISION,
error=True,
@@ -1163,8 +1162,8 @@ class ConductorManager(base_manager.BaseConductorManager):
task.driver.power.validate(task)
task.driver.network.validate(task)
except exception.InvalidParameterValue as e:
- msg = (_('Validation failed. Cannot clean node %(node)s. '
- 'Error: %(msg)s') %
+ msg = (_('Validation of node %(node)s for cleaning '
+ 'failed: %(msg)s') %
{'node': node.uuid, 'msg': e})
raise exception.InvalidParameterValue(msg)
@@ -1358,8 +1357,7 @@ class ConductorManager(base_manager.BaseConductorManager):
with excutils.save_and_reraise_exception():
LOG.exception('Error in aborting the inspection of '
'node %(node)s', {'node': node.uuid})
- error = _('Failed to abort inspection. '
- 'Error: %s') % e
+ error = _('Failed to abort inspection: %s') % e
utils.node_history_record(task.node, event=error,
event_type=states.INTROSPECTION,
error=True,
@@ -1554,8 +1552,7 @@ class ConductorManager(base_manager.BaseConductorManager):
"while trying to get power state."))
except Exception as e:
LOG.debug("During power_failure_recovery, could "
- "not get power state for node %(node)s, "
- "Error: %(err)s.",
+ "not get power state for node %(node)s: %(err)s",
{'node': task.node.uuid, 'err': e})
else:
handle_recovery(task, power_state)
@@ -1711,10 +1708,7 @@ class ConductorManager(base_manager.BaseConductorManager):
# being triggered, as such we need to populate the
# internal info based on the configuration the user has
# supplied.
- iwdi = images.is_whole_disk_image(task.context,
- task.node.instance_info)
- if iwdi is not None:
- node.set_driver_internal_info('is_whole_disk_image', iwdi)
+ utils.update_image_type(task.context, task.node)
if deploy_utils.get_boot_option(node) != 'local':
# Calling boot validate to ensure that sufficient information
# is supplied to allow the node to be able to boot if takeover
@@ -1894,9 +1888,7 @@ class ConductorManager(base_manager.BaseConductorManager):
# the meantime, we don't know if the is_whole_disk_image value will
# change or not. It isn't saved to the DB, but only used with this
# node instance for the current validations.
- iwdi = images.is_whole_disk_image(context,
- task.node.instance_info)
- task.node.set_driver_internal_info('is_whole_disk_image', iwdi)
+ utils.update_image_type(context, task.node)
for iface_name in task.driver.non_vendor_interfaces:
iface = getattr(task.driver, iface_name)
result = reason = None
@@ -3659,7 +3651,8 @@ def do_sync_power_state(task, count):
# Also make sure to cache the current boot_mode and secure_boot states
utils.node_cache_boot_mode(task)
- if node.power_state and node.power_state == power_state:
+ if ((node.power_state and node.power_state == power_state)
+ or (node.power_state is None and power_state is None)):
# No action is needed
return 0
@@ -3669,7 +3662,8 @@ def do_sync_power_state(task, count):
node = task.node
# Repeat all checks with exclusive lock to avoid races
- if node.power_state and node.power_state == power_state:
+ if ((node.power_state and node.power_state == power_state)
+ or (node.power_state is None and power_state is None)):
# Node power state was updated to the correct value
return 0
elif node.provision_state in SYNC_EXCLUDED_STATES or node.maintenance:
diff --git a/ironic/conductor/utils.py b/ironic/conductor/utils.py
index ab091ab1c..4a0d68a5d 100644
--- a/ironic/conductor/utils.py
+++ b/ironic/conductor/utils.py
@@ -33,6 +33,7 @@ from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import faults
from ironic.common.i18n import _
+from ironic.common import images
from ironic.common import network
from ironic.common import nova
from ironic.common import states
@@ -248,8 +249,8 @@ def _can_skip_state_change(task, new_state):
except Exception as e:
with excutils.save_and_reraise_exception():
error = _(
- "Failed to change power state to '%(target)s'. "
- "Error: %(error)s") % {'target': new_state, 'error': e}
+ "Failed to change power state to '%(target)s': %(error)s") % {
+ 'target': new_state, 'error': e}
node_history_record(node, event=error, error=True)
node['target_power_state'] = states.NOSTATE
node.save()
@@ -331,7 +332,7 @@ def node_power_action(task, new_state, timeout=None):
node['target_power_state'] = states.NOSTATE
error = _(
"Failed to change power state to '%(target_state)s' "
- "by '%(new_state)s'. Error: %(error)s") % {
+ "by '%(new_state)s': %(error)s") % {
'target_state': target_state,
'new_state': new_state,
'error': e}
@@ -364,7 +365,7 @@ def node_power_action(task, new_state, timeout=None):
task.driver.storage.detach_volumes(task)
except exception.StorageError as e:
LOG.warning("Volume detachment for node %(node)s "
- "failed. Error: %(error)s",
+ "failed: %(error)s",
{'node': node.uuid, 'error': e})
@@ -1538,8 +1539,8 @@ def node_change_boot_mode(task, target_boot_mode):
'class': type(exc).__name__, 'exc': exc},
exc_info=not isinstance(exc, exception.IronicException))
task.node.last_error = (
- "Failed to change boot mode to '%(target)s'. "
- "Error: %(err)s" % {'target': target_boot_mode, 'err': exc})
+ "Failed to change boot mode to '%(target)s: %(err)s" % {
+ 'target': target_boot_mode, 'err': exc})
task.node.save()
else:
LOG.info("Changed boot_mode to %(mode)s for node %(node)s",
@@ -1586,8 +1587,8 @@ def node_change_secure_boot(task, secure_boot_target):
'class': type(exc).__name__, 'exc': exc},
exc_info=not isinstance(exc, exception.IronicException))
task.node.last_error = (
- "Failed to change secure_boot state to '%(target)s'. "
- "Error: %(err)s" % {'target': secure_boot_target, 'err': exc})
+ "Failed to change secure_boot state to '%(target)s': %(err)s" % {
+ 'target': secure_boot_target, 'err': exc})
task.node.save()
else:
LOG.info("Changed secure_boot state to %(state)s for node %(node)s",
@@ -1649,3 +1650,24 @@ def node_history_record(node, conductor=None, event=None,
severity=error and "ERROR" or "INFO",
event=event,
event_type=event_type or "UNKNOWN").create()
+
+
+def update_image_type(context, node):
+ """Updates is_whole_disk_image and image_type based on the node data.
+
+ :param context: Request context.
+ :param node: Node object.
+ :return: True if any changes have been done, else False.
+ """
+ iwdi = images.is_whole_disk_image(context, node.instance_info)
+ if iwdi is None:
+ return False
+
+ node.set_driver_internal_info('is_whole_disk_image', iwdi)
+ # We need to gradually phase out is_whole_disk_image in favour of
+ # image_type, so make sure to set it as well. The primary use case is to
+ # cache information detected from Glance or the presence of kernel/ramdisk.
+ node.set_instance_info(
+ 'image_type',
+ images.IMAGE_TYPE_WHOLE_DISK if iwdi else images.IMAGE_TYPE_PARTITION)
+ return True
diff --git a/ironic/conf/api.py b/ironic/conf/api.py
index dcf235edd..2b0e9a824 100644
--- a/ironic/conf/api.py
+++ b/ironic/conf/api.py
@@ -15,9 +15,20 @@
# under the License.
from oslo_config import cfg
+from oslo_config import types as cfg_types
from ironic.common.i18n import _
+
+class Octal(cfg_types.Integer):
+
+ def __call__(self, value):
+ if isinstance(value, int):
+ return value
+ else:
+ return int(str(value), 8)
+
+
opts = [
cfg.HostAddressOpt('host_ip',
default='0.0.0.0',
@@ -26,6 +37,11 @@ opts = [
cfg.PortOpt('port',
default=6385,
help=_('The TCP port on which ironic-api listens.')),
+ cfg.StrOpt('unix_socket',
+ help=_('Unix socket to listen on. Disables host_ip and port.')),
+ cfg.Opt('unix_socket_mode', type=Octal(),
+ help=_('File mode (an octal number) of the unix socket to '
+ 'listen on. Ignored if unix_socket is not set.')),
cfg.IntOpt('max_limit',
default=1000,
mutable=True,
diff --git a/ironic/conf/deploy.py b/ironic/conf/deploy.py
index 32f53644a..7a7fb37d7 100644
--- a/ironic/conf/deploy.py
+++ b/ironic/conf/deploy.py
@@ -128,7 +128,10 @@ opts = [
help=_('Default boot option to use when no boot option is '
'requested in node\'s driver_info. Defaults to '
'"local". Prior to the Ussuri release, the default '
- 'was "netboot".')),
+ 'was "netboot".'),
+ deprecated_for_removal=True,
+ deprecated_reason=_('Support for network boot will be removed '
+ 'after the Yoga release.')),
cfg.StrOpt('default_boot_mode',
choices=[(boot_modes.UEFI, _('UEFI boot mode')),
(boot_modes.LEGACY_BIOS, _('Legacy BIOS boot mode'))],
diff --git a/ironic/conf/glance.py b/ironic/conf/glance.py
index c83fa92e1..a3286b1eb 100644
--- a/ironic/conf/glance.py
+++ b/ironic/conf/glance.py
@@ -91,6 +91,11 @@ opts = [
'section). Swift temporary URL format: '
'"endpoint_url/api_version/account/container/object_id"')),
cfg.StrOpt(
+ 'swift_account_prefix',
+ default='AUTH',
+ help=_('The prefix added to the project uuid to determine the swift '
+ 'account.')),
+ cfg.StrOpt(
'swift_container',
default='glance',
help=_('The Swift container Glance is configured to store its '
diff --git a/ironic/conf/redfish.py b/ironic/conf/redfish.py
index eddf3e013..3cc9fe015 100644
--- a/ironic/conf/redfish.py
+++ b/ironic/conf/redfish.py
@@ -90,6 +90,21 @@ opts = [
default=60,
help=_('Number of seconds to wait between checking for '
'failed firmware update tasks')),
+ cfg.StrOpt('firmware_source',
+ choices=[('http', _('If firmware source URL is also HTTP, then '
+ 'serve from original location, otherwise '
+ 'copy to ironic\'s HTTP server. Default.')),
+ ('local', _('Download from original location and '
+ 'server from ironic\'s HTTP server.')),
+ ('swift', _('If firmware source URL is also Swift, '
+ 'serve from original location, otherwise '
+ 'copy to ironic\'s Swift server.'))],
+ default='http',
+ mutable=True,
+ help=_('Specifies how firmware image should be served. Whether '
+ 'from its original location using the firmware source '
+ 'URL directly, or should serve it from ironic\'s Swift '
+ 'or HTTP server.')),
cfg.IntOpt('raid_config_status_interval',
min=0,
default=60,
diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py
index f309a1011..c171f81b1 100644
--- a/ironic/drivers/modules/agent.py
+++ b/ironic/drivers/modules/agent.py
@@ -471,6 +471,13 @@ class AgentDeploy(CustomAgentDeploy):
deploy_utils.check_for_missing_params(params, error_msg)
+ image_type = node.instance_info.get('image_type')
+ if image_type and image_type not in images.VALID_IMAGE_TYPES:
+ raise exception.InvalidParameterValue(
+ _('Invalid image_type "%(value)s", valid are %(valid)s')
+ % {'value': image_type,
+ 'valid': ', '.join(images.VALID_IMAGE_TYPES)})
+
# NOTE(dtantsur): glance images contain a checksum; for file images we
# will recalculate the checksum anyway.
if (not service_utils.is_glance_image(image_source)
@@ -498,6 +505,11 @@ class AgentDeploy(CustomAgentDeploy):
validate_http_provisioning_configuration(node)
validate_image_proxies(node)
+ capabilities = utils.parse_instance_info_capabilities(node)
+ if 'boot_option' in capabilities:
+ LOG.warning("The boot_option capability has been deprecated, "
+ "please unset it for node %s", node.uuid)
+
@METRICS.timer('AgentDeployMixin.write_image')
@base.deploy_step(priority=80)
@task_manager.require_exclusive_lock
diff --git a/ironic/drivers/modules/agent_base.py b/ironic/drivers/modules/agent_base.py
index 8f480aca7..582c36d90 100644
--- a/ironic/drivers/modules/agent_base.py
+++ b/ironic/drivers/modules/agent_base.py
@@ -502,7 +502,7 @@ class HeartbeatMixin(object):
msg = _('Failed to process the next deploy step')
self.process_next_step(task, 'deploy')
except Exception as e:
- last_error = _('%(msg)s. Error: %(exc)s') % {'msg': msg, 'exc': e}
+ last_error = _('%(msg)s: %(exc)s') % {'msg': msg, 'exc': e}
LOG.exception('Asynchronous exception for node %(node)s: %(err)s',
{'node': task.node.uuid, 'err': last_error})
# Do not call the error handler is the node is already DEPLOYFAIL
@@ -537,7 +537,7 @@ class HeartbeatMixin(object):
if not polling:
self.continue_cleaning(task)
except Exception as e:
- last_error = _('%(msg)s. Error: %(exc)s') % {'msg': msg, 'exc': e}
+ last_error = _('%(msg)s: %(exc)s') % {'msg': msg, 'exc': e}
log_msg = ('Asynchronous exception for node %(node)s: %(err)s' %
{'node': task.node.uuid, 'err': last_error})
if node.provision_state in (states.CLEANING, states.CLEANWAIT):
@@ -549,7 +549,7 @@ class HeartbeatMixin(object):
try:
self._finalize_rescue(task)
except Exception as e:
- last_error = _('%(msg)s. Error: %(exc)s') % {'msg': msg, 'exc': e}
+ last_error = _('%(msg)s: %(exc)s') % {'msg': msg, 'exc': e}
LOG.exception('Asynchronous exception for node %(node)s: %(err)s',
{'node': task.node.uuid, 'err': last_error})
if task.node.provision_state in (states.RESCUING,
@@ -1194,7 +1194,7 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
'command "sync"')
LOG.warning(
'Failed to flush the file system prior to hard '
- 'rebooting the node %(node)s. Error: %(error)s',
+ 'rebooting the node %(node)s: %(error)s',
{'node': node.uuid, 'error': error})
manager_utils.node_power_action(task, states.POWER_OFF)
@@ -1327,7 +1327,7 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
)
if result['command_status'] == 'FAILED':
msg = (_("Failed to install a bootloader when "
- "deploying node %(node)s. Error: %(error)s") %
+ "deploying node %(node)s: %(error)s") %
{'node': node.uuid,
'error': agent_client.get_command_error(result)})
log_and_raise_deployment_error(task, msg)
@@ -1341,7 +1341,7 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
persistent=persistent)
except Exception as e:
msg = (_("Failed to change the boot device to %(boot_dev)s "
- "when deploying node %(node)s. Error: %(error)s") %
+ "when deploying node %(node)s: %(error)s") %
{'boot_dev': boot_devices.DISK, 'node': node.uuid,
'error': e})
log_and_raise_deployment_error(task, msg, exc=e)
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/main.yaml
index 9baa882a6..ed8d168c3 100644
--- a/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/main.yaml
+++ b/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/main.yaml
@@ -1,4 +1,4 @@
- import_tasks: mounts.yaml
- when: ironic.image.type | default('whole-disk-image') == 'partition'
+ when: ironic.image.type | default('whole-disk') == 'partition'
- import_tasks: grub.yaml
- when: ironic.image.type | default('whole-disk-image') == 'partition'
+ when: ironic.image.type | default('whole-disk') == 'partition'
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/main.yaml
index e92aba69d..1c13bc523 100644
--- a/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/main.yaml
+++ b/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/main.yaml
@@ -1,2 +1,2 @@
- import_tasks: parted.yaml
- when: ironic.image.type | default('whole-disk-image') == 'partition'
+ when: ironic.image.type | default('whole-disk') == 'partition'
diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py
index f46250e2a..f6883249f 100644
--- a/ironic/drivers/modules/deploy_utils.py
+++ b/ironic/drivers/modules/deploy_utils.py
@@ -570,24 +570,27 @@ def validate_image_properties(task, deploy_info):
if not image_href:
image_href = boot_iso
- properties = []
- if boot_iso or task.node.driver_internal_info.get('is_whole_disk_image'):
- # No image properties are required in this case
+ boot_option = get_boot_option(task.node)
+
+ if (boot_iso
+ or task.node.driver_internal_info.get('is_whole_disk_image')
+ or boot_option == 'local'):
+ # No image properties are required in this case, but validate that the
+ # image at least looks reasonable.
+ try:
+ image_service.get_image_service(image_href, context=task.context)
+ except exception.ImageRefValidationFailed as e:
+ raise exception.InvalidParameterValue(err=e)
return
if service_utils.is_glance_image(image_href):
properties = ['kernel_id', 'ramdisk_id']
- boot_option = get_boot_option(task.node)
if boot_option == 'kickstart':
properties.append('stage2_id')
- else:
- properties = ['kernel', 'ramdisk']
-
- if image_href:
image_props = get_image_properties(task.context, image_href)
else:
- # Ramdisk deploy, no image_source is present
- image_props = []
+ properties = ['kernel', 'ramdisk']
+ image_props = {}
missing_props = []
for prop in properties:
@@ -601,12 +604,6 @@ def validate_image_properties(task, deploy_info):
"%(properties)s") % {'image': image_href, 'properties': props})
-def get_default_boot_option():
- """Gets the default boot option."""
- # TODO(TheJulia): Deprecated: Remove after Ussuri.
- return CONF.deploy.default_boot_option
-
-
def get_boot_option(node):
"""Gets the boot option.
@@ -803,7 +800,8 @@ def get_image_instance_info(node):
"specified at the same time."))
info['boot_iso'] = boot_iso
else:
- if get_boot_option(node) == 'ramdisk':
+ boot_option = get_boot_option(node)
+ if boot_option == 'ramdisk':
# Ramdisk deploy does not require an image
info['kernel'] = node.instance_info.get('kernel')
info['ramdisk'] = node.instance_info.get('ramdisk')
@@ -813,6 +811,7 @@ def get_image_instance_info(node):
is_whole_disk_image = node.driver_internal_info.get(
'is_whole_disk_image')
if (not is_whole_disk_image
+ and boot_option != 'local'
and not service_utils.is_glance_image(image_source)):
info['kernel'] = node.instance_info.get('kernel')
info['ramdisk'] = node.instance_info.get('ramdisk')
@@ -827,8 +826,7 @@ def get_image_instance_info(node):
return info
-_ERR_MSG_INVALID_DEPLOY = _("Cannot validate parameter for driver deploy. "
- "Invalid parameter %(param)s. Reason: %(reason)s")
+_ERR_MSG_INVALID_DEPLOY = _("Invalid parameter %(param)s: %(reason)s")
def parse_instance_info(node):
@@ -850,16 +848,17 @@ def parse_instance_info(node):
i_info = {}
i_info['image_source'] = info.get('image_source')
iwdi = node.driver_internal_info.get('is_whole_disk_image')
+ boot_option = get_boot_option(node)
if not iwdi:
if (i_info['image_source']
+ and boot_option != 'local'
and not service_utils.is_glance_image(
i_info['image_source'])):
i_info['kernel'] = info.get('kernel')
i_info['ramdisk'] = info.get('ramdisk')
i_info['root_gb'] = info.get('root_gb')
- error_msg = _("Cannot validate driver deploy. Some parameters were missing"
- " in node's instance_info")
+ error_msg = _("Some parameters were missing in node's instance_info")
check_for_missing_params(i_info, error_msg)
# This is used in many places, so keep it even for whole-disk images.
@@ -1071,7 +1070,7 @@ def _validate_image_url(node, url, secret=False):
except exception.ImageRefValidationFailed as e:
with excutils.save_and_reraise_exception():
LOG.error("The specified URL is not a valid HTTP(S) URL or is "
- "not reachable for node %(node)s. Error: %(msg)s",
+ "not reachable for node %(node)s: %(msg)s",
{'node': node.uuid, 'msg': e})
@@ -1180,6 +1179,7 @@ def build_instance_info_for_deploy(task):
iwdi = node.driver_internal_info.get('is_whole_disk_image')
image_source = instance_info['image_source']
image_download_source = get_image_download_source(node)
+ boot_option = get_boot_option(task.node)
if service_utils.is_glance_image(image_source):
glance = image_service.GlanceImageService(context=task.context)
@@ -1202,7 +1202,7 @@ def build_instance_info_for_deploy(task):
instance_info['image_tags'] = image_info.get('tags', [])
instance_info['image_properties'] = image_info['properties']
- if not iwdi:
+ if not iwdi and boot_option != 'local':
instance_info['kernel'] = image_info['properties']['kernel_id']
instance_info['ramdisk'] = image_info['properties']['ramdisk_id']
elif (image_source.startswith('file://')
@@ -1213,11 +1213,11 @@ def build_instance_info_for_deploy(task):
instance_info['image_url'] = image_source
if not iwdi:
- instance_info['image_type'] = 'partition'
+ instance_info['image_type'] = images.IMAGE_TYPE_PARTITION
i_info = parse_instance_info(node)
instance_info.update(i_info)
else:
- instance_info['image_type'] = 'whole-disk-image'
+ instance_info['image_type'] = images.IMAGE_TYPE_WHOLE_DISK
return instance_info
@@ -1422,7 +1422,10 @@ def reboot_to_finish_step(task):
:returns: states.CLEANWAIT if cleaning operation in progress
or states.DEPLOYWAIT if deploy operation in progress.
"""
- prepare_agent_boot(task)
+ disable_ramdisk = task.node.driver_internal_info.get(
+ 'cleaning_disable_ramdisk')
+ if not disable_ramdisk:
+ prepare_agent_boot(task)
manager_utils.node_power_action(task, states.REBOOT)
return get_async_step_return_state(task.node)
@@ -1447,5 +1450,5 @@ def get_root_device_for_deploy(node):
except ValueError as e:
raise exception.InvalidParameterValue(
_('Failed to validate the root device hints %(hints)s (from the '
- 'node\'s %(source)s) for node %(node)s. Error: %(error)s') %
+ 'node\'s %(source)s) for node %(node)s: %(error)s') %
{'node': node.uuid, 'hints': hints, 'source': source, 'error': e})
diff --git a/ironic/drivers/modules/drac/bios.py b/ironic/drivers/modules/drac/bios.py
index 8ea3ff51f..2ee565768 100644
--- a/ironic/drivers/modules/drac/bios.py
+++ b/ironic/drivers/modules/drac/bios.py
@@ -71,7 +71,7 @@ class DracWSManBIOS(base.BIOSInterface):
reason=_("Unable to import dracclient.exceptions library"))
@METRICS.timer('DracWSManBIOS.apply_configuration')
- @base.clean_step(priority=0, argsinfo=_args_info)
+ @base.clean_step(priority=0, argsinfo=_args_info, requires_ramdisk=False)
@base.deploy_step(priority=0, argsinfo=_args_info)
def apply_configuration(self, task, settings):
"""Apply the BIOS configuration to the node
@@ -352,7 +352,7 @@ class DracWSManBIOS(base.BIOSInterface):
manager_utils.notify_conductor_resume_deploy(task)
@METRICS.timer('DracWSManBIOS.factory_reset')
- @base.clean_step(priority=0)
+ @base.clean_step(priority=0, requires_ramdisk=False)
@base.deploy_step(priority=0)
def factory_reset(self, task):
"""Reset the BIOS settings of the node to the factory default.
@@ -418,7 +418,7 @@ class DracWSManBIOS(base.BIOSInterface):
node.timestamp_driver_internal_info('factory_reset_time')
# rebooting the server to apply factory reset value
- client.set_power_state('REBOOT')
+ task.driver.power.reboot(task)
# This method calls node.save(), bios_config_job_id will be
# saved automatically
diff --git a/ironic/drivers/modules/drac/management.py b/ironic/drivers/modules/drac/management.py
index a4278731f..df6942611 100644
--- a/ironic/drivers/modules/drac/management.py
+++ b/ironic/drivers/modules/drac/management.py
@@ -362,7 +362,8 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
**IMPORT_CONFIGURATION_ARGSINFO}
@base.deploy_step(priority=0, argsinfo=EXPORT_CONFIGURATION_ARGSINFO)
- @base.clean_step(priority=0, argsinfo=EXPORT_CONFIGURATION_ARGSINFO)
+ @base.clean_step(priority=0, argsinfo=EXPORT_CONFIGURATION_ARGSINFO,
+ requires_ramdisk=False)
def export_configuration(self, task, export_configuration_location):
"""Export the configuration of the server.
@@ -411,7 +412,8 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
{'node': task.node.uuid}))
@base.deploy_step(priority=0, argsinfo=IMPORT_CONFIGURATION_ARGSINFO)
- @base.clean_step(priority=0, argsinfo=IMPORT_CONFIGURATION_ARGSINFO)
+ @base.clean_step(priority=0, argsinfo=IMPORT_CONFIGURATION_ARGSINFO,
+ requires_ramdisk=False)
def import_configuration(self, task, import_configuration_location):
"""Import and apply the configuration to the server.
@@ -457,7 +459,8 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
return deploy_utils.reboot_to_finish_step(task)
@base.clean_step(priority=0,
- argsinfo=IMPORT_EXPORT_CONFIGURATION_ARGSINFO)
+ argsinfo=IMPORT_EXPORT_CONFIGURATION_ARGSINFO,
+ requires_ramdisk=False)
@base.deploy_step(priority=0,
argsinfo=IMPORT_EXPORT_CONFIGURATION_ARGSINFO)
def import_export_configuration(self, task, import_configuration_location,
@@ -612,7 +615,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
@METRICS.timer('DracRedfishManagement.clear_job_queue')
@base.verify_step(priority=0)
- @base.clean_step(priority=0)
+ @base.clean_step(priority=0, requires_ramdisk=False)
def clear_job_queue(self, task):
"""Clear iDRAC job queue.
@@ -626,7 +629,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
@METRICS.timer('DracRedfishManagement.reset_idrac')
@base.verify_step(priority=0)
- @base.clean_step(priority=0)
+ @base.clean_step(priority=0, requires_ramdisk=False)
def reset_idrac(self, task):
"""Reset the iDRAC.
@@ -642,7 +645,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
@METRICS.timer('DracRedfishManagement.known_good_state')
@base.verify_step(priority=0)
- @base.clean_step(priority=0)
+ @base.clean_step(priority=0, requires_ramdisk=False)
def known_good_state(self, task):
"""Reset iDRAC to known good state.
@@ -764,7 +767,7 @@ class DracWSManManagement(base.ManagementInterface):
@METRICS.timer('DracManagement.reset_idrac')
@base.verify_step(priority=0)
- @base.clean_step(priority=0)
+ @base.clean_step(priority=0, requires_ramdisk=False)
def reset_idrac(self, task):
"""Reset the iDRAC.
@@ -779,7 +782,7 @@ class DracWSManManagement(base.ManagementInterface):
@METRICS.timer('DracManagement.known_good_state')
@base.verify_step(priority=0)
- @base.clean_step(priority=0)
+ @base.clean_step(priority=0, requires_ramdisk=False)
def known_good_state(self, task):
"""Reset the iDRAC, Clear the job queue.
@@ -795,7 +798,7 @@ class DracWSManManagement(base.ManagementInterface):
@METRICS.timer('DracManagement.clear_job_queue')
@base.verify_step(priority=0)
- @base.clean_step(priority=0)
+ @base.clean_step(priority=0, requires_ramdisk=False)
def clear_job_queue(self, task):
"""Clear the job queue.
diff --git a/ironic/drivers/modules/drac/raid.py b/ironic/drivers/modules/drac/raid.py
index d4e633f9c..ae06f0dfa 100644
--- a/ironic/drivers/modules/drac/raid.py
+++ b/ironic/drivers/modules/drac/raid.py
@@ -27,6 +27,7 @@ import tenacity
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import raid as raid_common
+from ironic.common import states
from ironic.conductor import periodics
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
@@ -1171,6 +1172,13 @@ def _wait_till_realtime_ready(task):
:raises RedfishError: If can't find OEM extension or it fails to
execute
"""
+ # If running without IPA, check that system is ON, if not, turn it on
+ disable_ramdisk = task.node.driver_internal_info.get(
+ 'cleaning_disable_ramdisk')
+ power_state = task.driver.power.get_power_state(task)
+ if disable_ramdisk and power_state == states.POWER_OFF:
+ task.driver.power.set_power_state(task, states.POWER_ON)
+
try:
_retry_till_realtime_ready(task)
except tenacity.RetryError:
@@ -1238,7 +1246,7 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
),
'required': False,
}
- })
+ }, requires_ramdisk=False)
def create_configuration(self, task, create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=False):
@@ -1267,7 +1275,7 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
task, create_root_volume, create_nonroot_volumes,
delete_existing)
- @base.clean_step(priority=0)
+ @base.clean_step(priority=0, requires_ramdisk=False)
@base.deploy_step(priority=0)
def delete_configuration(self, task):
"""Delete RAID configuration on the node.
@@ -1593,7 +1601,7 @@ class DracWSManRAID(base.RAIDInterface):
),
"required": False,
}
- })
+ }, requires_ramdisk=False)
def create_configuration(self, task,
create_root_volume=True,
create_nonroot_volumes=True,
@@ -1690,7 +1698,7 @@ class DracWSManRAID(base.RAIDInterface):
return _create_virtual_disks(task, node)
@METRICS.timer('DracRAID.delete_configuration')
- @base.clean_step(priority=0)
+ @base.clean_step(priority=0, requires_ramdisk=False)
@base.deploy_step(priority=0)
def delete_configuration(self, task):
"""Delete the RAID configuration.
diff --git a/ironic/drivers/modules/ilo/boot.py b/ironic/drivers/modules/ilo/boot.py
index 166499d29..5087e3998 100644
--- a/ironic/drivers/modules/ilo/boot.py
+++ b/ironic/drivers/modules/ilo/boot.py
@@ -374,14 +374,7 @@ class IloVirtualMediaBoot(base.BootInterface):
"""
node = task.node
- # NOTE(TheJulia): If this method is being called by something
- # aside from deployment, clean and rescue, such as conductor takeover,
- # we should treat this as a no-op and move on otherwise we would
- # modify the state of the node due to virtual media operations.
- if node.provision_state not in (states.DEPLOYING,
- states.CLEANING,
- states.RESCUING,
- states.INSPECTING):
+ if not driver_utils.need_prepare_ramdisk(node):
return
prepare_node_for_deploy(task)
@@ -962,14 +955,7 @@ class IloUefiHttpsBoot(base.BootInterface):
:raises: IloOperationError, if some operation on iLO failed.
"""
node = task.node
- # NOTE(TheJulia): If this method is being called by something
- # aside from deployment, clean and rescue, such as conductor takeover,
- # we should treat this as a no-op and move on otherwise we would
- # modify the state of the node due to virtual media operations.
- if node.provision_state not in (states.DEPLOYING,
- states.CLEANING,
- states.RESCUING,
- states.INSPECTING):
+ if not driver_utils.need_prepare_ramdisk(node):
return
prepare_node_for_deploy(task)
diff --git a/ironic/drivers/modules/inspector.py b/ironic/drivers/modules/inspector.py
index ba29e0f14..833934793 100644
--- a/ironic/drivers/modules/inspector.py
+++ b/ironic/drivers/modules/inspector.py
@@ -20,6 +20,7 @@ import shlex
from urllib import parse as urlparse
import eventlet
+from keystoneauth1 import exceptions as ks_exception
import openstack
from oslo_log import log as logging
@@ -66,9 +67,14 @@ def _get_client(context):
conf['ironic-inspector'] = conf.pop('inspector')
# TODO(pas-ha) investigate possibility of passing user context here,
# similar to what neutron/glance-related code does
- return openstack.connection.Connection(
- session=session,
- oslo_conf=conf).baremetal_introspection
+ try:
+ return openstack.connection.Connection(
+ session=session,
+ oslo_conf=conf).baremetal_introspection
+ except ks_exception.DiscoveryFailure as exc:
+ raise exception.ConfigInvalid(
+ _("Could not contact ironic-inspector for version discovery: %s")
+ % exc)
def _get_callback_endpoint(client):
diff --git a/ironic/drivers/modules/irmc/boot.py b/ironic/drivers/modules/irmc/boot.py
index 7438137f7..1195a0670 100644
--- a/ironic/drivers/modules/irmc/boot.py
+++ b/ironic/drivers/modules/irmc/boot.py
@@ -971,13 +971,7 @@ class IRMCVirtualMediaBoot(base.BootInterface, IRMCVolumeBootMixIn):
:raises: IRMCOperationError, if some operation on iRMC fails.
"""
- # NOTE(TheJulia): If this method is being called by something
- # aside from deployment, clean and rescue, such as conductor takeover,
- # we should treat this as a no-op and move on otherwise we would
- # modify the state of the node due to virtual media operations.
- if task.node.provision_state not in (states.DEPLOYING,
- states.CLEANING,
- states.RESCUING):
+ if not driver_utils.need_prepare_ramdisk(task.node):
return
# NOTE(tiendc): Before deploying, we need to backup BIOS config
diff --git a/ironic/drivers/modules/ks.cfg.template b/ironic/drivers/modules/ks.cfg.template
index 40552377b..941d3c37d 100644
--- a/ironic/drivers/modules/ks.cfg.template
+++ b/ironic/drivers/modules/ks.cfg.template
@@ -18,7 +18,7 @@ autopart
# Downloading and installing OS image using liveimg section is mandatory
liveimg --url {{ ks_options.liveimg_url }}
-# Following %pre, %onerror and %trackback sections are mandatory
+# Following %pre and %onerror sections are mandatory
%pre
/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "start", "agent_status_message": "Deployment starting. Running pre-installation scripts."}' {{ ks_options.heartbeat_url }}
%end
@@ -27,11 +27,16 @@ liveimg --url {{ ks_options.liveimg_url }}
/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "error", "agent_status_message": "Error: Deploying using anaconda. Check console for more information."}' {{ ks_options.heartbeat_url }}
%end
-%traceback
-/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "error", "agent_status_message": "Error: Installer crashed unexpectedly."}' {{ ks_options.heartbeat_url }}
-%end
+# Config-drive information, if any.
+{{ ks_options.config_drive }}
-# Sending callback after the installation is mandatory
+# Sending callback after the installation is mandatory.
+# This ought to be the last thing done; otherwise the
+# ironic-conductor could reboot the node before anaconda
+# finishes executing everything in this file.
+# The sync makes sure that the data is flushed out to disk,
+# before rebooting.
%post
+sync
/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "end", "agent_status_message": "Deployment completed successfully."}' {{ ks_options.heartbeat_url }}
%end
diff --git a/ironic/drivers/modules/pxe.py b/ironic/drivers/modules/pxe.py
index 50d962fcf..fe93acefd 100644
--- a/ironic/drivers/modules/pxe.py
+++ b/ironic/drivers/modules/pxe.py
@@ -85,6 +85,9 @@ class PXEAnacondaDeploy(agent_base.AgentBaseMixin, agent_base.HeartbeatMixin,
# NOTE(TheJulia): If this was any other interface, we would
# unconfigure tenant networks, add provisioning networks, etc.
task.driver.storage.attach_volumes(task)
+ node.instance_info = deploy_utils.build_instance_info_for_deploy(
+ task)
+ node.save()
if node.provision_state in (states.ACTIVE, states.UNRESCUING):
# In the event of takeover or unrescue.
task.driver.boot.prepare_instance(task)
@@ -123,16 +126,16 @@ class PXEAnacondaDeploy(agent_base.AgentBaseMixin, agent_base.HeartbeatMixin,
agent_base.log_and_raise_deployment_error(task, msg)
try:
+ task.process_event('resume')
self.clean_up(task)
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.remove_provisioning_network(task)
task.driver.network.configure_tenant_networks(task)
manager_utils.node_power_action(task, states.POWER_ON)
- node.provision_state = states.ACTIVE
- node.save()
+ task.process_event('done')
except Exception as e:
- msg = (_('Error rebooting node %(node)s after deploy. '
- 'Error: %(error)s') %
+ msg = (_('An error occurred after deployment, while preparing to '
+ 'reboot the node %(node)s: %(error)s') %
{'node': node.uuid, 'error': e})
agent_base.log_and_raise_deployment_error(task, msg)
@@ -161,3 +164,14 @@ class PXEAnacondaDeploy(agent_base.AgentBaseMixin, agent_base.HeartbeatMixin,
'%(agent_status_message)s', msg)
deploy_utils.set_failed_state(task, agent_status_message,
collect_logs=False)
+
+ @METRICS.timer('AnacondaDeploy.clean_up')
+ @task_manager.require_exclusive_lock
+ def clean_up(self, task):
+ super(PXEAnacondaDeploy, self).clean_up(task)
+ node = task.node
+ # NOTE(rloo): These were added during deployment, as a side-effect of
+ # pxe_utils.get_instance_image_info().
+ node.del_driver_internal_info('stage2')
+ node.del_driver_internal_info('ks_template')
+ node.save()
diff --git a/ironic/drivers/modules/redfish/boot.py b/ironic/drivers/modules/redfish/boot.py
index 47b85cc25..55c826fc6 100644
--- a/ironic/drivers/modules/redfish/boot.py
+++ b/ironic/drivers/modules/redfish/boot.py
@@ -468,14 +468,7 @@ class RedfishVirtualMediaBoot(base.BootInterface):
operation failed on the node.
"""
node = task.node
- # NOTE(TheJulia): If this method is being called by something
- # aside from deployment, clean and rescue, such as conductor takeover,
- # we should treat this as a no-op and move on otherwise we would
- # modify the state of the node due to virtual media operations.
- if node.provision_state not in (states.DEPLOYING,
- states.CLEANING,
- states.RESCUING,
- states.INSPECTING):
+ if not driver_utils.need_prepare_ramdisk(node):
return
d_info = _parse_driver_info(node)
diff --git a/ironic/drivers/modules/redfish/firmware_utils.py b/ironic/drivers/modules/redfish/firmware_utils.py
index 35e4bb1f2..c73cb80dd 100644
--- a/ironic/drivers/modules/redfish/firmware_utils.py
+++ b/ironic/drivers/modules/redfish/firmware_utils.py
@@ -11,11 +11,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+import shutil
+import tempfile
+from urllib import parse as urlparse
+
import jsonschema
from oslo_log import log
+from oslo_utils import fileutils
from ironic.common import exception
from ironic.common.i18n import _
+from ironic.common import image_service
+from ironic.common import swift
+from ironic.conf import CONF
LOG = log.getLogger(__name__)
@@ -26,22 +35,35 @@ _UPDATE_FIRMWARE_SCHEMA = {
# list of firmware update images
"items": {
"type": "object",
- "required": ["url"],
+ "required": ["url", "checksum"],
"properties": {
"url": {
"description": "URL for firmware file",
"type": "string",
"minLength": 1
},
+ "checksum": {
+ "description": "SHA1 checksum for firmware file",
+ "type": "string",
+ "minLength": 1
+ },
"wait": {
"description": "optional wait time for firmware update",
"type": "integer",
"minimum": 1
+ },
+ "source":
+ {
+ "description": "optional firmware_source to override global "
+ "setting for firmware file",
+ "type": "string",
+ "enum": ["http", "local", "swift"]
}
},
"additionalProperties": False
}
}
+_FIRMWARE_SUBDIR = 'firmware'
def validate_update_firmware_args(firmware_images):
@@ -56,3 +78,180 @@ def validate_update_firmware_args(firmware_images):
raise exception.InvalidParameterValue(
_('Invalid firmware update %(firmware_images)s. Errors: %(err)s')
% {'firmware_images': firmware_images, 'err': err})
+
+
+def get_swift_temp_url(parsed_url):
+ """Gets Swift temporary URL
+
+ :param parsed_url: Parsed URL from URL in format
+ swift://container/[sub-folder/]file
+ :returns: Swift temporary URL
+ """
+ return swift.SwiftAPI().get_temp_url(
+ parsed_url.netloc, parsed_url.path.lstrip('/'),
+ CONF.redfish.swift_object_expiry_timeout)
+
+
+def download_to_temp(node, url):
+ """Downloads to temporary location from given URL
+
+ :param node: Node for which to download to temporary location
+ :param url: URL to download from
+ :returns: File path of temporary location file is downloaded to
+ """
+ parsed_url = urlparse.urlparse(url)
+ scheme = parsed_url.scheme.lower()
+ if scheme not in ('http', 'swift', 'file'):
+ raise exception.InvalidParameterValue(
+ _('%(scheme)s is not supported for %(url)s.')
+ % {'scheme': scheme, 'url': parsed_url.geturl()})
+
+ tempdir = os.path.join(tempfile.gettempdir(), node.uuid)
+ os.makedirs(tempdir, exist_ok=True)
+ temp_file = os.path.join(
+ tempdir,
+ os.path.basename(parsed_url.path))
+ LOG.debug('For node %(node)s firmware at %(url)s will be downloaded to '
+ 'temporary location at %(temp_file)s',
+ {'node': node.uuid, 'url': url, 'temp_file': temp_file})
+ if scheme == 'http':
+ with open(temp_file, 'wb') as tf:
+ image_service.HttpImageService().download(url, tf)
+ elif scheme == 'swift':
+ swift_url = get_swift_temp_url(parsed_url)
+ with open(temp_file, 'wb') as tf:
+ image_service.HttpImageService().download(swift_url, tf)
+ elif scheme == 'file':
+ with open(temp_file, 'wb') as tf:
+ image_service.FileImageService().download(
+ parsed_url.path, tf)
+
+ return temp_file
+
+
+def verify_checksum(node, checksum, file_path):
+ """Verify checksum.
+
+ :param node: Node for which file to verify checksum
+ :param checksum: Expected checksum value
+ :param file_path: File path for which to verify checksum
+ :raises RedfishError: When checksum does not match
+ """
+ calculated_checksum = fileutils.compute_file_checksum(
+ file_path, algorithm='sha1')
+ if checksum != calculated_checksum:
+ raise exception.RedfishError(
+ _('For node %(node)s firmware file %(temp_file)s checksums do not '
+ 'match. Expected: %(checksum)s, calculated: '
+ '%(calculated_checksum)s.')
+ % {'node': node.uuid, 'temp_file': file_path, 'checksum': checksum,
+ 'calculated_checksum': calculated_checksum})
+
+
+def stage(node, source, temp_file):
+ """Stage temporary file to configured location
+
+ :param node: Node for which to stage the file
+ :param source: Where to stage the file. Corresponds to
+ CONF.redfish.firmware_source.
+ :param temp_file: File path of temporary file to stage
+ :returns: Tuple of staged URL and source (http or swift) that needs
+ cleanup of staged files afterwards.
+ :raises RedfishError: If staging to HTTP server has failed.
+ """
+ staged_url = None
+ filename = os.path.basename(temp_file)
+ if source in ('http', 'local'):
+ http_url = CONF.deploy.external_http_url or CONF.deploy.http_url
+ staged_url = urlparse.urljoin(
+ http_url, "/".join([_FIRMWARE_SUBDIR, node.uuid, filename]))
+ staged_folder = os.path.join(
+ CONF.deploy.http_root, _FIRMWARE_SUBDIR, node.uuid)
+ staged_path = os.path.join(staged_folder, filename)
+ LOG.debug('For node %(node)s temporary file %(temp_file)s will be '
+ 'hard-linked or copied to %(staged_path)s and served over '
+ '%(staged_url)s',
+ {'node': node.uuid, 'temp_file': temp_file,
+ 'staged_path': staged_path, 'staged_url': staged_url})
+ os.makedirs(staged_folder, exist_ok=True)
+ try:
+ os.link(temp_file, staged_path)
+ os.chmod(temp_file, CONF.redfish.file_permission)
+ except OSError as oserror:
+ LOG.debug("Could not hardlink file %(temp_file)s to location "
+ "%(staged_path)s. Will try to copy it. Error: %(error)s",
+ {'temp_file': temp_file, 'staged_path': staged_path,
+ 'error': oserror})
+ try:
+ shutil.copyfile(temp_file, staged_path)
+ os.chmod(staged_path, CONF.redfish.file_permission)
+ except IOError as ioerror:
+ raise exception.RedfishError(
+ _('For %(node)s failed to copy firmware file '
+ '%(temp_file)s to HTTP server root. Error %(error)s')
+ % {'node': node.uuid, 'temp_file': temp_file,
+ 'error': ioerror})
+
+ elif source == 'swift':
+ container = CONF.redfish.swift_container
+ timeout = CONF.redfish.swift_object_expiry_timeout
+ swift_api = swift.SwiftAPI()
+ object_name = "/".join([node.uuid, filename])
+ swift_api.create_object(
+ container,
+ object_name,
+ temp_file,
+ object_headers={'X-Delete-After': str(timeout)})
+ staged_url = swift_api.get_temp_url(
+ container, object_name, timeout)
+ LOG.debug('For node %(node)s temporary file at %(temp_file)s will be '
+ 'served from Swift temporary URL %(staged_url)s',
+ {'node': node.uuid, 'temp_file': temp_file,
+ 'staged_url': staged_url})
+
+ need_cleanup = 'swift' if source == 'swift' else 'http'
+ return staged_url, need_cleanup
+
+
+def cleanup(node):
+ """Clean up staged files
+
+ :param node: Node for which to clean up. Should contain
+ 'firmware_cleanup' entry in `driver_internal_info` to indicate
+ source(s) to be cleaned up.
+ """
+ # Cleaning up temporary just in case there is something when staging
+ # to http or swift has failed.
+ temp_dir = os.path.join(tempfile.gettempdir(), node.uuid)
+ LOG.debug('For node %(node)s cleaning up temporary files, if any, from '
+ '%(temp_dir)s.', {'node': node.uuid, 'temp_dir': temp_dir})
+ shutil.rmtree(temp_dir, ignore_errors=True)
+
+ cleanup = node.driver_internal_info.get('firmware_cleanup')
+ if not cleanup:
+ return
+
+ if 'http' in cleanup:
+ http_dir = os.path.join(
+ CONF.deploy.http_root, _FIRMWARE_SUBDIR, node.uuid)
+ LOG.debug('For node %(node)s cleaning up files from %(http_dir)s.',
+ {'node': node.uuid, 'http_dir': http_dir})
+ shutil.rmtree(http_dir, ignore_errors=True)
+
+ if 'swift' in cleanup:
+ swift_api = swift.SwiftAPI()
+ container = CONF.redfish.swift_container
+ LOG.debug('For node %(node)s cleaning up files from Swift container '
+ '%(container)s.',
+ {'node': node.uuid, 'container': container})
+ _, objects = swift_api.connection.get_container(container)
+ for o in objects:
+ name = o.get('name')
+ if name and name.startswith(node.uuid):
+ try:
+ swift_api.delete_object(container, name)
+ except exception.SwiftOperationError as error:
+ LOG.warning('For node %(node)s failed to clean up '
+ '%(object)s. Error: %(error)s',
+ {'node': node.uuid, 'object': name,
+ 'error': error})
diff --git a/ironic/drivers/modules/redfish/management.py b/ironic/drivers/modules/redfish/management.py
index cb56a821b..a669d09bc 100644
--- a/ironic/drivers/modules/redfish/management.py
+++ b/ironic/drivers/modules/redfish/management.py
@@ -14,6 +14,7 @@
# under the License.
import collections
+from urllib.parse import urlparse
from ironic_lib import metrics_utils
from oslo_log import log
@@ -799,7 +800,8 @@ class RedfishManagement(base.ManagementInterface):
"""
firmware_update = firmware_updates[0]
- firmware_url = firmware_update['url']
+ firmware_url, need_cleanup = self._stage_firmware_file(
+ node, firmware_update)
LOG.debug('Applying firmware %(firmware_image)s to node '
'%(node_uuid)s',
@@ -809,8 +811,15 @@ class RedfishManagement(base.ManagementInterface):
task_monitor = update_service.simple_update(firmware_url)
firmware_update['task_monitor'] = task_monitor.task_monitor_uri
- node.set_driver_internal_info('firmware_updates',
- firmware_updates)
+ node.set_driver_internal_info('firmware_updates', firmware_updates)
+
+ if need_cleanup:
+ fw_cleanup = node.driver_internal_info.get('firmware_cleanup')
+ if not fw_cleanup:
+ fw_cleanup = [need_cleanup]
+ elif need_cleanup not in fw_cleanup:
+ fw_cleanup.append(need_cleanup)
+ node.set_driver_internal_info('firmware_cleanup', fw_cleanup)
def _continue_firmware_updates(self, task, update_service,
firmware_updates):
@@ -860,13 +869,18 @@ class RedfishManagement(base.ManagementInterface):
manager_utils.node_power_action(task, states.REBOOT)
def _clear_firmware_updates(self, node):
- """Clears firmware updates from driver_internal_info
+ """Clears firmware updates artifacts
+
+ Clears firmware updates from driver_internal_info and any files
+ that were staged.
Note that the caller must have an exclusive lock on the node.
:param node: the node to clear the firmware updates from
"""
+ firmware_utils.cleanup(node)
node.del_driver_internal_info('firmware_updates')
+ node.del_driver_internal_info('firmware_cleanup')
node.save()
@METRICS.timer('RedfishManagement._query_firmware_update_failed')
@@ -1012,6 +1026,56 @@ class RedfishManagement(base.ManagementInterface):
{'node': node.uuid,
'firmware_image': current_update['url']})
+ def _stage_firmware_file(self, node, firmware_update):
+ """Stage firmware update according to configuration.
+
+ :param node: Node for which to stage the firmware file
+ :param firmware_update: Firmware update to stage
+ :returns: Tuple of staged URL and source that needs cleanup of
+ staged files afterwards. If not staging, then return
+ original URL and None for source that needs cleanup.
+ :raises IronicException: If something goes wrong with staging.
+ """
+ try:
+ url = firmware_update['url']
+ parsed_url = urlparse(url)
+ scheme = parsed_url.scheme.lower()
+ source = (firmware_update.get('source')
+ or CONF.redfish.firmware_source).lower()
+
+ # Keep it simple, in further processing TLS does not matter
+ if scheme == 'https':
+ scheme = 'http'
+
+ # If source and scheme is HTTP, then no staging,
+ # returning original location
+ if scheme == 'http' and source == scheme:
+ LOG.debug('For node %(node)s serving firmware from original '
+ 'location %(url)s', {'node': node.uuid, 'url': url})
+ return url, None
+
+ # If source and scheme is Swift, then not moving, but
+ # returning Swift temp URL
+ if scheme == 'swift' and source == scheme:
+ temp_url = firmware_utils.get_swift_temp_url(parsed_url)
+ LOG.debug('For node %(node)s serving original firmware at '
+ '%(url)s via Swift temporary url %(temp_url)s',
+ {'node': node.uuid, 'url': url,
+ 'temp_url': temp_url})
+ return temp_url, None
+
+ # For remaining, download the image to temporary location
+ temp_file = firmware_utils.download_to_temp(node, url)
+
+ firmware_utils.verify_checksum(
+ node, firmware_update.get('checksum'), temp_file)
+
+ return firmware_utils.stage(node, source, temp_file)
+
+ except exception.IronicException as error:
+ firmware_utils.cleanup(node)
+ raise error
+
def get_secure_boot_state(self, task):
"""Get the current secure boot state for the node.
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
index aa4294497..ab9a3589e 100644
--- a/ironic/drivers/modules/redfish/raid.py
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import itertools
import math
from ironic_lib import metrics_utils
@@ -700,32 +701,6 @@ class RedfishRAID(base.RAIDInterface):
"""
return redfish_utils.COMMON_PROPERTIES.copy()
- def _validate_vendor(self, task):
- vendor = task.node.properties.get('vendor')
- if not vendor:
- return
-
- if 'dell' in vendor.lower().split():
- raise exception.InvalidParameterValue(
- _("The %(iface)s raid interface is not suitable for node "
- "%(node)s with vendor %(vendor)s, use idrac-redfish instead")
- % {'iface': task.node.get_interface('raid'),
- 'node': task.node.uuid, 'vendor': vendor})
-
- def validate(self, task):
- """Validates the RAID Interface.
-
- This method validates the properties defined by Ironic for RAID
- configuration. Driver implementations of this interface can override
- this method for doing more validations (such as BMC's credentials).
-
- :param task: A TaskManager instance.
- :raises: InvalidParameterValue, if the RAID configuration is invalid.
- :raises: MissingParameterValue, if some parameters are missing.
- """
- self._validate_vendor(task)
- super(RedfishRAID, self).validate(task)
-
def validate_raid_config(self, task, raid_config):
"""Validates the given RAID configuration.
@@ -817,31 +792,14 @@ class RedfishRAID(base.RAIDInterface):
logical_disks_to_create = self.pre_create_configuration(
task, logical_disks_to_create)
- reboot_required = False
- raid_configs = list()
- for logical_disk in logical_disks_to_create:
- raid_config = dict()
- response = create_virtual_disk(
- task,
- raid_controller=logical_disk.get('controller'),
- physical_disks=logical_disk['physical_disks'],
- raid_level=logical_disk['raid_level'],
- size_bytes=logical_disk['size_bytes'],
- disk_name=logical_disk.get('name'),
- span_length=logical_disk.get('span_length'),
- span_depth=logical_disk.get('span_depth'),
- error_handler=self.volume_create_error_handler)
- # only save the async tasks (task_monitors) in raid_config
- if (response is not None
- and hasattr(response, 'task_monitor_uri')):
- raid_config['operation'] = 'create'
- raid_config['raid_controller'] = logical_disk.get(
- 'controller')
- raid_config['task_monitor_uri'] = response.task_monitor_uri
- reboot_required = True
- raid_configs.append(raid_config)
-
- node.set_driver_internal_info('raid_configs', raid_configs)
+ # Group logical disks by controller
+ def gb_key(x):
+ return x.get('controller')
+ gb_list = itertools.groupby(
+ sorted(logical_disks_to_create, key=gb_key), gb_key)
+ ld_grouped = {k: list(g) for k, g in gb_list}
+ raid_configs, reboot_required = self._submit_create_configuration(
+ task, ld_grouped)
return_state = None
deploy_utils.set_async_step_flags(
@@ -866,53 +824,8 @@ class RedfishRAID(base.RAIDInterface):
complete.
"""
node = task.node
- system = redfish_utils.get_system(node)
- vols_to_delete = []
- try:
- for storage in system.storage.get_members():
- controller = (storage.storage_controllers[0]
- if storage.storage_controllers else None)
- controller_id = None
- if controller:
- controller_id = storage.identity
- for volume in storage.volumes.get_members():
- if (volume.raid_type or volume.volume_type not in
- [None, sushy.VOLUME_TYPE_RAW_DEVICE]):
- vols_to_delete.append((storage.volumes, volume,
- controller_id))
- except sushy.exceptions.SushyError as exc:
- error_msg = _('Cannot get the list of volumes to delete for node '
- '%(node_uuid)s. Reason: %(error)s.' %
- {'node_uuid': node.uuid, 'error': exc})
- LOG.error(error_msg)
- raise exception.RedfishError(error=exc)
-
- self.pre_delete_configuration(task, vols_to_delete)
-
- reboot_required = False
- raid_configs = list()
- for vol_coll, volume, controller_id in vols_to_delete:
- raid_config = dict()
- apply_time = None
- apply_time_support = vol_coll.operation_apply_time_support
- if (apply_time_support
- and apply_time_support.mapped_supported_values):
- supported_values = apply_time_support.mapped_supported_values
- if sushy.APPLY_TIME_IMMEDIATE in supported_values:
- apply_time = sushy.APPLY_TIME_IMMEDIATE
- elif sushy.APPLY_TIME_ON_RESET in supported_values:
- apply_time = sushy.APPLY_TIME_ON_RESET
- response = volume.delete(apply_time=apply_time)
- # only save the async tasks (task_monitors) in raid_config
- if (response is not None
- and hasattr(response, 'task_monitor_uri')):
- raid_config['operation'] = 'delete'
- raid_config['raid_controller'] = controller_id
- raid_config['task_monitor_uri'] = response.task_monitor_uri
- reboot_required = True
- raid_configs.append(raid_config)
-
- node.set_driver_internal_info('raid_configs', raid_configs)
+ raid_configs, reboot_required = self._submit_delete_configuration(
+ task)
return_state = None
deploy_utils.set_async_step_flags(
@@ -1041,77 +954,211 @@ class RedfishRAID(base.RAIDInterface):
"""Periodic job to check RAID config tasks."""
self._check_node_raid_config(task)
- def _get_error_messages(self, response):
- try:
- body = response.json()
- except ValueError:
- return []
- else:
- error = body.get('error', {})
- code = error.get('code', '')
- message = error.get('message', code)
- ext_info = error.get('@Message.ExtendedInfo', [{}])
- messages = [m.get('Message') for m in ext_info if 'Message' in m]
- if not messages and message:
- messages = [message]
- return messages
-
- def _raid_config_in_progress(self, task, raid_config):
- """Check if this RAID configuration operation is still in progress."""
- task_monitor_uri = raid_config['task_monitor_uri']
+ def _raid_config_in_progress(self, task, task_monitor_uri, operation):
+ """Check if this RAID configuration operation is still in progress.
+
+ :param task: TaskManager object containing the node.
+ :param task_monitor_uri: Redfish task monitor URI
+ :param operation: 'create' or 'delete' operation for given task.
+ Used in log messages.
+ :returns: True, if still in progress, otherwise False.
+ """
try:
task_monitor = redfish_utils.get_task_monitor(task.node,
task_monitor_uri)
except exception.RedfishError:
- LOG.info('Unable to get status of RAID %(operation)s task to node '
- '%(node_uuid)s; assuming task completed successfully',
- {'operation': raid_config['operation'],
+ LOG.info('Unable to get status of RAID %(operation)s task '
+ '%(task_mon_uri)s to node %(node_uuid)s; assuming task '
+ 'completed successfully',
+ {'operation': operation,
+ 'task_mon_uri': task_monitor_uri,
'node_uuid': task.node.uuid})
return False
if task_monitor.is_processing:
- LOG.debug('RAID %(operation)s task %(task_mon)s to node '
+ LOG.debug('RAID %(operation)s task %(task_mon_uri)s to node '
'%(node_uuid)s still in progress',
- {'operation': raid_config['operation'],
- 'task_mon': task_monitor.task_monitor_uri,
+ {'operation': operation,
+ 'task_mon_uri': task_monitor.task_monitor_uri,
'node_uuid': task.node.uuid})
return True
else:
- response = task_monitor.response
- if response is not None:
- status_code = response.status_code
- if status_code >= 400:
- messages = self._get_error_messages(response)
- LOG.error('RAID %(operation)s task to node '
- '%(node_uuid)s failed with status '
- '%(status_code)s; messages: %(messages)s',
- {'operation': raid_config['operation'],
- 'node_uuid': task.node.uuid,
- 'status_code': status_code,
- 'messages': ", ".join(messages)})
- else:
- LOG.info('RAID %(operation)s task to node '
- '%(node_uuid)s completed with status '
- '%(status_code)s',
- {'operation': raid_config['operation'],
- 'node_uuid': task.node.uuid,
- 'status_code': status_code})
+ sushy_task = task_monitor.get_task()
+ messages = []
+ if sushy_task.messages and not sushy_task.messages[0].message:
+ sushy_task.parse_messages()
+
+ messages = [m.message for m in sushy_task.messages]
+
+ if (sushy_task.task_state == sushy.TASK_STATE_COMPLETED
+ and sushy_task.task_status in
+ [sushy.HEALTH_OK, sushy.HEALTH_WARNING]):
+ LOG.info('RAID %(operation)s task %(task_mon_uri)s to node '
+ '%(node_uuid)s completed.',
+ {'operation': operation,
+ 'task_mon_uri': task_monitor.task_monitor_uri,
+ 'node_uuid': task.node.uuid})
+ else:
+ LOG.error('RAID %(operation)s task %(task_mon_uri)s to node '
+ '%(node_uuid)s failed; messages: %(messages)s',
+ {'operation': operation,
+ 'task_mon_uri': task_monitor.task_monitor_uri,
+ 'node_uuid': task.node.uuid,
+ 'messages': ", ".join(messages)})
return False
@METRICS.timer('RedfishRAID._check_node_raid_config')
def _check_node_raid_config(self, task):
- """Check the progress of running RAID config on a node."""
+ """Check the progress of running RAID config on a node.
+
+ :param task: TaskManager object containing the node.
+ """
node = task.node
raid_configs = node.driver_internal_info['raid_configs']
task.upgrade_lock()
- raid_configs[:] = [i for i in raid_configs
- if self._raid_config_in_progress(task, i)]
-
- if not raid_configs:
- self._clear_raid_configs(node)
- LOG.info('RAID configuration completed for node %(node)s',
- {'node': node.uuid})
- if task.node.clean_step:
- manager_utils.notify_conductor_resume_clean(task)
+ raid_configs['task_monitor_uri'] =\
+ [i for i in raid_configs.get('task_monitor_uri')
+ if self._raid_config_in_progress(
+ task, i, raid_configs.get('operation'))]
+ node.set_driver_internal_info('raid_configs', raid_configs)
+
+ if not raid_configs['task_monitor_uri']:
+ if raid_configs.get('pending'):
+ if raid_configs.get('operation') == 'create':
+ reboot_required = self._submit_create_configuration(
+ task, raid_configs.get('pending'))[1]
+ else:
+ reboot_required = self._submit_delete_configuration(
+ task)[1]
+ if reboot_required:
+ deploy_utils.reboot_to_finish_step(task)
else:
- manager_utils.notify_conductor_resume_deploy(task)
+ self._clear_raid_configs(node)
+ LOG.info('RAID configuration completed for node %(node)s',
+ {'node': node.uuid})
+ if task.node.clean_step:
+ manager_utils.notify_conductor_resume_clean(task)
+ else:
+ manager_utils.notify_conductor_resume_deploy(task)
+
+ def _submit_create_configuration(self, task, ld_grouped):
+ """Processes and submits requests for creating RAID configuration.
+
+ :param task: TaskManager object containing the node.
+ :param ld_grouped: Dictionary of logical disks, grouped by controller.
+
+ :returns: tuple of 1) dictionary containing operation name (create),
+ pending items, and task monitor URIs, and 2) flag indicating if
+ reboot is required.
+ """
+ node = task.node
+ reboot_required = False
+ raid_configs = {'operation': 'create', 'pending': {}}
+ for controller, logical_disks in ld_grouped.items():
+ iter_logical_disks = iter(logical_disks)
+ for logical_disk in iter_logical_disks:
+ response = create_virtual_disk(
+ task,
+ raid_controller=logical_disk.get('controller'),
+ physical_disks=logical_disk['physical_disks'],
+ raid_level=logical_disk['raid_level'],
+ size_bytes=logical_disk['size_bytes'],
+ disk_name=logical_disk.get('name'),
+ span_length=logical_disk.get('span_length'),
+ span_depth=logical_disk.get('span_depth'),
+ error_handler=self.volume_create_error_handler)
+ if (response is not None
+ and hasattr(response, 'task_monitor_uri')):
+ raid_configs.setdefault('task_monitor_uri', []).append(
+ response.task_monitor_uri)
+ reboot_required = True
+ # Don't process any on this controller until these
+ # created to avoid failures where only 1 request
+ # per controller can be submitted for non-immediate
+ break
+ # Append remaining disks for this controller, if any left
+ for logical_disk in iter_logical_disks:
+ raid_configs['pending'].setdefault(controller, []).append(
+ logical_disk)
+
+ node.set_driver_internal_info('raid_configs', raid_configs)
+
+ return raid_configs, reboot_required
+
+ def _submit_delete_configuration(self, task):
+ """Processes and submits requests for deleting virtual disks.
+
+ :param task: TaskManager object containing the node.
+
+ :returns: tuple of 1) dictionary containing operation name (delete),
+ flag to indicate if any disks remaining, and task monitor URIs,
+ and 2) flag indicating if reboot is required
+ :raises RedfishError: if fails to get list of virtual disks
+ """
+ node = task.node
+ system = redfish_utils.get_system(node)
+ vols_to_delete = {}
+ any_left = False
+ try:
+ for storage in system.storage.get_members():
+ controller = (storage.storage_controllers[0]
+ if storage.storage_controllers else None)
+ controller_id = None
+ if controller:
+ controller_id = storage.identity
+ iter_volumes = iter(storage.volumes.get_members())
+ for volume in iter_volumes:
+ if (volume.raid_type or volume.volume_type not in
+ [None, sushy.VOLUME_TYPE_RAW_DEVICE]):
+ if controller_id not in vols_to_delete:
+ vols_to_delete[controller_id] = []
+ apply_time = self._get_apply_time(
+ storage.volumes.operation_apply_time_support)
+ vols_to_delete[controller_id].append((
+ apply_time, volume))
+ if apply_time == sushy.APPLY_TIME_ON_RESET:
+ # Don't process any on this controller until these
+ # deleted to avoid failures where only 1 request
+ # per controller can be submitted for non-immediate
+ break
+ any_left = any(iter_volumes)
+ except sushy.exceptions.SushyError as exc:
+ error_msg = _('Cannot get the list of volumes to delete for node '
+ '%(node_uuid)s. Reason: %(error)s.' %
+ {'node_uuid': node.uuid, 'error': exc})
+ LOG.error(error_msg)
+ raise exception.RedfishError(error=exc)
+
+ self.pre_delete_configuration(task, vols_to_delete)
+
+ reboot_required = False
+ raid_configs = {'operation': 'delete', 'pending': any_left}
+ for controller, vols_to_delete in vols_to_delete.items():
+ for apply_time, volume in vols_to_delete:
+ response = volume.delete(apply_time=apply_time)
+ # only save the async tasks (task_monitors) in raid_config
+ if (response is not None
+ and hasattr(response, 'task_monitor_uri')):
+ raid_configs.setdefault('task_monitor_uri', []).append(
+ response.task_monitor_uri)
+ reboot_required = True
+
+ node.set_driver_internal_info('raid_configs', raid_configs)
+
+ return raid_configs, reboot_required
+
+ def _get_apply_time(self, apply_time_support):
+ """Gets apply time for RAID operations
+
+ :param apply_time_support: Supported apply times
+ :returns: None, if supported apply times not specified. Otherwise
+ Immediate when available, or OnReset that will require rebooting.
+ """
+ apply_time = None
+ if apply_time_support and apply_time_support.mapped_supported_values:
+ supported_values = apply_time_support.mapped_supported_values
+ if sushy.APPLY_TIME_IMMEDIATE in supported_values:
+ apply_time = sushy.APPLY_TIME_IMMEDIATE
+ elif sushy.APPLY_TIME_ON_RESET in supported_values:
+ apply_time = sushy.APPLY_TIME_ON_RESET
+ return apply_time
diff --git a/ironic/drivers/modules/snmp.py b/ironic/drivers/modules/snmp.py
index a200b6717..4e700c6f8 100644
--- a/ironic/drivers/modules/snmp.py
+++ b/ironic/drivers/modules/snmp.py
@@ -230,7 +230,10 @@ class SNMPClient(object):
object depending on SNMP version being used.
"""
if self.version == SNMP_V3:
- return snmp.UsmUserData(
+ # NOTE(TheJulia): Ignore Bandit error B509 argument parsing as
+ # the check is for a count of <3 arguments, however our line
+ # wrapping causes the check to trigger.
+ return snmp.UsmUserData( # nosec B509
self.user,
authKey=self.auth_key,
authProtocol=self.auth_proto,
diff --git a/ironic/drivers/utils.py b/ironic/drivers/utils.py
index b90660b59..5e1596eb4 100644
--- a/ironic/drivers/utils.py
+++ b/ironic/drivers/utils.py
@@ -23,6 +23,7 @@ from oslo_utils import timeutils
from ironic.common import exception
from ironic.common.i18n import _
+from ironic.common import states
from ironic.common import swift
from ironic.conductor import utils
from ironic.drivers import base
@@ -449,3 +450,23 @@ def get_agent_kernel_ramdisk(node, mode='deploy', deprecated_prefix=None):
def get_agent_iso(node, mode='deploy', deprecated_prefix=None):
"""Get the agent ISO image."""
return get_field(node, f'{mode}_iso', deprecated_prefix)
+
+
+def need_prepare_ramdisk(node):
+ """Check if node needs preparing ramdisk
+
+ :param node: Node to check for
+ :returns: True if need to prepare ramdisk, otherwise False
+ """
+ # NOTE(TheJulia): If current node provisioning is something aside from
+ # deployment, clean, rescue or inspect such as conductor takeover,
+ # we should treat this as a no-op and move on otherwise we would
+ # modify the state of the node due to virtual media operations.
+ return node.provision_state in (states.DEPLOYING,
+ states.DEPLOYWAIT,
+ states.CLEANING,
+ states.CLEANWAIT,
+ states.RESCUING,
+ states.RESCUEWAIT,
+ states.INSPECTING,
+ states.INSPECTWAIT)
diff --git a/ironic/objects/node.py b/ironic/objects/node.py
index cdb218301..7c6c8bc1a 100644
--- a/ironic/objects/node.py
+++ b/ironic/objects/node.py
@@ -729,6 +729,18 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
"""
self.set_driver_internal_info(key, timeutils.utcnow().isoformat())
+ def set_instance_info(self, key, value):
+ """Set an `instance_info` value.
+
+ Setting a `instance_info` dict value via this method ensures that this
+ field will be flagged for saving.
+
+ :param key: Key of item to set
+ :param value: Value of item to set
+ """
+ self.instance_info[key] = value
+ self._changed_fields.add('instance_info')
+
@base.IronicObjectRegistry.register
class NodePayload(notification.NotificationPayloadBase):
diff --git a/ironic/tests/unit/api/controllers/v1/test_allocation.py b/ironic/tests/unit/api/controllers/v1/test_allocation.py
index 367c06350..0bc739bf2 100644
--- a/ironic/tests/unit/api/controllers/v1/test_allocation.py
+++ b/ironic/tests/unit/api/controllers/v1/test_allocation.py
@@ -192,7 +192,9 @@ class TestListAllocations(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['allocations']))
next_marker = data['allocations'][-1]['uuid']
- self.assertIn(next_marker, data['next'])
+ self.assertIn('/allocations', data['next'])
+ self.assertIn('limit=3', data['next'])
+ self.assertIn(f'marker={next_marker}', data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
@@ -207,7 +209,10 @@ class TestListAllocations(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['allocations']))
next_marker = data['allocations'][-1]['uuid']
- self.assertIn(next_marker, data['next'])
+ self.assertIn('/allocations', data['next'])
+ # FIXME(dtantsur): IMO this should not pass, but it does now
+ self.assertIn('limit=3', data['next'])
+ self.assertIn(f'marker={next_marker}', data['next'])
def test_collection_links_custom_fields(self):
cfg.CONF.set_override('max_limit', 3, 'api')
@@ -227,8 +232,9 @@ class TestListAllocations(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['allocations']))
next_marker = data['allocations'][-1]['uuid']
- self.assertIn(next_marker, data['next'])
- self.assertIn('fields', data['next'])
+ self.assertIn('/allocations', data['next'])
+ self.assertIn(f'marker={next_marker}', data['next'])
+ self.assertIn(f'fields={fields}', data['next'])
def test_get_collection_pagination_no_uuid(self):
fields = 'node_uuid'
diff --git a/ironic/tests/unit/api/controllers/v1/test_conductor.py b/ironic/tests/unit/api/controllers/v1/test_conductor.py
index caf85eb4c..d5e54ee1b 100644
--- a/ironic/tests/unit/api/controllers/v1/test_conductor.py
+++ b/ironic/tests/unit/api/controllers/v1/test_conductor.py
@@ -188,7 +188,9 @@ class TestListConductors(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['conductors']))
next_marker = data['conductors'][-1]['hostname']
- self.assertIn(next_marker, data['next'])
+ self.assertIn('/conductors', data['next'])
+ self.assertIn('limit=3', data['next'])
+ self.assertIn(f'marker={next_marker}', data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
@@ -204,7 +206,8 @@ class TestListConductors(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['conductors']))
next_marker = data['conductors'][-1]['hostname']
- self.assertIn(next_marker, data['next'])
+ self.assertIn('/conductors', data['next'])
+ self.assertIn(f'marker={next_marker}', data['next'])
def test_collection_links_custom_fields(self):
cfg.CONF.set_override('max_limit', 3, 'api')
@@ -221,8 +224,9 @@ class TestListConductors(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['conductors']))
next_marker = data['conductors'][-1]['hostname']
- self.assertIn(next_marker, data['next'])
- self.assertIn('fields', data['next'])
+ self.assertIn('/conductors', data['next'])
+ self.assertIn(f'marker={next_marker}', data['next'])
+ self.assertIn(f'fields={fields}', data['next'])
def test_sort_key(self):
conductors = []
diff --git a/ironic/tests/unit/api/controllers/v1/test_deploy_template.py b/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
index ed7239d5c..b86fb0b1d 100644
--- a/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
+++ b/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
@@ -210,7 +210,9 @@ class TestListDeployTemplates(BaseDeployTemplatesAPITest):
self.assertEqual(3, len(data['deploy_templates']))
next_marker = data['deploy_templates'][-1]['uuid']
- self.assertIn(next_marker, data['next'])
+ self.assertIn('/deploy_templates', data['next'])
+ self.assertIn('limit=3', data['next'])
+ self.assertIn(f'marker={next_marker}', data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
@@ -224,7 +226,8 @@ class TestListDeployTemplates(BaseDeployTemplatesAPITest):
self.assertEqual(3, len(data['deploy_templates']))
next_marker = data['deploy_templates'][-1]['uuid']
- self.assertIn(next_marker, data['next'])
+ self.assertIn('/deploy_templates', data['next'])
+ self.assertIn(f'marker={next_marker}', data['next'])
def test_collection_links_custom_fields(self):
cfg.CONF.set_override('max_limit', 3, 'api')
@@ -240,8 +243,9 @@ class TestListDeployTemplates(BaseDeployTemplatesAPITest):
headers=self.headers)
self.assertEqual(3, len(data['deploy_templates']))
next_marker = data['deploy_templates'][-1]['uuid']
- self.assertIn(next_marker, data['next'])
- self.assertIn('fields', data['next'])
+ self.assertIn('/deploy_templates', data['next'])
+ self.assertIn(f'marker={next_marker}', data['next'])
+ self.assertIn(f'fields={fields}', data['next'])
def test_get_collection_pagination_no_uuid(self):
fields = 'name'
@@ -259,6 +263,7 @@ class TestListDeployTemplates(BaseDeployTemplatesAPITest):
headers=self.headers)
self.assertEqual(limit, len(data['deploy_templates']))
+ self.assertIn('/deploy_templates', data['next'])
self.assertIn('marker=%s' % templates[limit - 1].uuid, data['next'])
def test_sort_key(self):
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index ee957178c..3c913834e 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -7841,6 +7841,10 @@ class TestNodeHistory(test_api_base.BaseApiTest):
self.assertEqual(1, len(entries))
result_uuid = entries[0]['uuid']
self.assertEqual(self.event1.uuid, result_uuid)
+ self.assertIn('next', ret)
+ self.assertIn('nodes/%s/history' % self.node.uuid, ret['next'])
+ self.assertIn('limit=1', ret['next'])
+ self.assertIn('marker=%s' % result_uuid, ret['next'])
# Second request
ret = self.get_json('/nodes/%s/history?limit=1&marker=%s' %
(self.node.uuid, result_uuid),
@@ -7850,6 +7854,9 @@ class TestNodeHistory(test_api_base.BaseApiTest):
self.assertEqual(1, len(entries))
result_uuid = entries[0]['uuid']
self.assertEqual(self.event2.uuid, result_uuid)
+ self.assertIn('nodes/%s/history' % self.node.uuid, ret['next'])
+ self.assertIn('limit=1', ret['next'])
+ self.assertIn('marker=%s' % result_uuid, ret['next'])
# Third request
ret = self.get_json('/nodes/%s/history?limit=1&marker=%s' %
(self.node.uuid, result_uuid),
@@ -7859,3 +7866,6 @@ class TestNodeHistory(test_api_base.BaseApiTest):
self.assertEqual(1, len(entries))
result_uuid = entries[0]['uuid']
self.assertEqual(self.event3.uuid, result_uuid)
+ self.assertIn('nodes/%s/history' % self.node.uuid, ret['next'])
+ self.assertIn('limit=1', ret['next'])
+ self.assertIn('marker=%s' % result_uuid, ret['next'])
diff --git a/ironic/tests/unit/api/controllers/v1/test_port.py b/ironic/tests/unit/api/controllers/v1/test_port.py
index 12208e049..6823c3b51 100644
--- a/ironic/tests/unit/api/controllers/v1/test_port.py
+++ b/ironic/tests/unit/api/controllers/v1/test_port.py
@@ -194,7 +194,8 @@ class TestPortsController__GetPortsCollection(base.TestCase):
mock_request.context = 'fake-context'
mock_list.return_value = []
self.controller._get_ports_collection(None, None, None, None, None,
- None, 'asc')
+ None, 'asc',
+ resource_url='ports')
mock_list.assert_called_once_with('fake-context', 1000, None,
project=None, sort_dir='asc',
sort_key=None)
@@ -1104,7 +1105,7 @@ class TestListPorts(test_api_base.BaseApiTest):
autospec=True)
def test_detail_with_incorrect_api_usage(self, mock_gpc):
mock_gpc.return_value = api_port.list_convert_with_links(
- [], 0)
+ [], 0, 'port')
# GET /v1/ports/detail specifying node and node_uuid. In this case
# we expect the node_uuid interface to be used.
self.get_json('/ports/detail?node=%s&node_uuid=%s' %
diff --git a/ironic/tests/unit/common/test_glance_service.py b/ironic/tests/unit/common/test_glance_service.py
index 09f64a00f..6be0fccd9 100644
--- a/ironic/tests/unit/common/test_glance_service.py
+++ b/ironic/tests/unit/common/test_glance_service.py
@@ -605,6 +605,62 @@ class TestGlanceSwiftTempURL(base.TestCase):
method='GET')
swift_mock.assert_called_once_with()
+ @mock.patch('ironic.common.swift.get_swift_session', autospec=True)
+ @mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
+ def test_swift_temp_url_account_detected_with_prefix(self, tempurl_mock,
+ swift_mock):
+ self.config(swift_account=None, group='glance')
+ self.config(swift_account_prefix='SWIFTPREFIX', group='glance')
+
+ path = ('/v1/SWIFTPREFIX_42/glance'
+ '/757274c4-2856-4bd2-bb20-9a4a231e187b')
+ tempurl_mock.return_value = (
+ path + '?temp_url_sig=hmacsig&temp_url_expires=1400001200')
+ auth_ref = swift_mock.return_value.auth.get_auth_ref.return_value
+ auth_ref.project_id = '42'
+
+ self.service._validate_temp_url_config = mock.Mock()
+
+ temp_url = self.service.swift_temp_url(image_info=self.fake_image)
+
+ self.assertEqual(CONF.glance.swift_endpoint_url
+ + tempurl_mock.return_value,
+ temp_url)
+ tempurl_mock.assert_called_with(
+ path=path,
+ seconds=CONF.glance.swift_temp_url_duration,
+ key=CONF.glance.swift_temp_url_key,
+ method='GET')
+ swift_mock.assert_called_once_with()
+
+ @mock.patch('ironic.common.swift.get_swift_session', autospec=True)
+ @mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
+ def test_swift_temp_url_account_detected_with_prefix_underscore(
+ self, tempurl_mock, swift_mock):
+ self.config(swift_account=None, group='glance')
+ self.config(swift_account_prefix='SWIFTPREFIX_', group='glance')
+
+ path = ('/v1/SWIFTPREFIX_42/glance'
+ '/757274c4-2856-4bd2-bb20-9a4a231e187b')
+ tempurl_mock.return_value = (
+ path + '?temp_url_sig=hmacsig&temp_url_expires=1400001200')
+ auth_ref = swift_mock.return_value.auth.get_auth_ref.return_value
+ auth_ref.project_id = '42'
+
+ self.service._validate_temp_url_config = mock.Mock()
+
+ temp_url = self.service.swift_temp_url(image_info=self.fake_image)
+
+ self.assertEqual(CONF.glance.swift_endpoint_url
+ + tempurl_mock.return_value,
+ temp_url)
+ tempurl_mock.assert_called_with(
+ path=path,
+ seconds=CONF.glance.swift_temp_url_duration,
+ key=CONF.glance.swift_temp_url_key,
+ method='GET')
+ swift_mock.assert_called_once_with()
+
@mock.patch('ironic.common.swift.SwiftAPI', autospec=True)
@mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
def test_swift_temp_url_key_detected(self, tempurl_mock, swift_mock):
diff --git a/ironic/tests/unit/common/test_images.py b/ironic/tests/unit/common/test_images.py
index 9892d671c..fe6b67ad3 100644
--- a/ironic/tests/unit/common/test_images.py
+++ b/ironic/tests/unit/common/test_images.py
@@ -218,6 +218,18 @@ class IronicImagesTestCase(base.TestCase):
@mock.patch.object(images, 'get_image_properties', autospec=True)
@mock.patch.object(glance_utils, 'is_glance_image', autospec=True)
+ def test_is_whole_disk_image_explicit(self, mock_igi, mock_gip):
+ for value, result in [(images.IMAGE_TYPE_PARTITION, False),
+ (images.IMAGE_TYPE_WHOLE_DISK, True)]:
+ instance_info = {'image_source': 'glance://partition_image',
+ 'image_type': value}
+ iwdi = images.is_whole_disk_image('context', instance_info)
+ self.assertIs(iwdi, result)
+ self.assertFalse(mock_igi.called)
+ self.assertFalse(mock_gip.called)
+
+ @mock.patch.object(images, 'get_image_properties', autospec=True)
+ @mock.patch.object(glance_utils, 'is_glance_image', autospec=True)
def test_is_whole_disk_image_partition_image(self, mock_igi, mock_gip):
mock_igi.return_value = True
mock_gip.return_value = {'kernel_id': 'kernel',
@@ -232,6 +244,20 @@ class IronicImagesTestCase(base.TestCase):
@mock.patch.object(images, 'get_image_properties', autospec=True)
@mock.patch.object(glance_utils, 'is_glance_image', autospec=True)
+ def test_is_whole_disk_image_partition_image_with_type(self, mock_igi,
+ mock_gip):
+ mock_igi.return_value = True
+ mock_gip.return_value = {'img_type': images.IMAGE_TYPE_PARTITION}
+ instance_info = {'image_source': 'glance://partition_image'}
+ image_source = instance_info['image_source']
+ is_whole_disk_image = images.is_whole_disk_image('context',
+ instance_info)
+ self.assertFalse(is_whole_disk_image)
+ mock_igi.assert_called_once_with(image_source)
+ mock_gip.assert_called_once_with('context', image_source)
+
+ @mock.patch.object(images, 'get_image_properties', autospec=True)
+ @mock.patch.object(glance_utils, 'is_glance_image', autospec=True)
def test_is_whole_disk_image_whole_disk_image(self, mock_igi, mock_gip):
mock_igi.return_value = True
mock_gip.return_value = {}
@@ -295,20 +321,22 @@ class FsImageTestCase(base.TestCase):
mkdir_mock.assert_any_call('root_dir', exist_ok=True)
mkdir_mock.assert_any_call('root_dir/sub_dir', exist_ok=True)
+ @mock.patch.object(os, 'listdir', autospec=True)
@mock.patch.object(images, '_create_root_fs', autospec=True)
@mock.patch.object(utils, 'tempdir', autospec=True)
@mock.patch.object(utils, 'write_to_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_create_vfat_image(
self, execute_mock, write_mock,
- tempdir_mock, create_root_fs_mock):
+ tempdir_mock, create_root_fs_mock, os_listdir_mock):
mock_file_handle = mock.MagicMock(spec=io.BytesIO)
- mock_file_handle.__enter__.return_value = 'tempdir'
+ mock_file_handle.__enter__.return_value = '/tempdir'
tempdir_mock.return_value = mock_file_handle
parameters = {'p1': 'v1'}
files_info = {'a': 'b'}
+ os_listdir_mock.return_value = ['b', 'qwe']
images.create_vfat_image('tgt_file', parameters=parameters,
files_info=files_info, parameters_file='qwe',
fs_size_kib=1000)
@@ -316,13 +344,15 @@ class FsImageTestCase(base.TestCase):
execute_mock.assert_has_calls([
mock.call('dd', 'if=/dev/zero', 'of=tgt_file', 'count=1',
'bs=1000KiB'),
- mock.call('mkfs', '-t', 'vfat', '-n', 'ir-vfd-de', 'tgt_file'),
- mock.call('mcopy', '-s', 'tempdir/*', '-i', 'tgt_file', '::')
+ mock.call('mkfs', '-t', 'vfat', '-n', 'ir-vfd-dev', 'tgt_file'),
+ mock.call('mcopy', '-s', '/tempdir/b', '/tempdir/qwe', '-i',
+ 'tgt_file', '::')
])
- parameters_file_path = os.path.join('tempdir', 'qwe')
+ parameters_file_path = os.path.join('/tempdir', 'qwe')
write_mock.assert_called_once_with(parameters_file_path, 'p1=v1')
- create_root_fs_mock.assert_called_once_with('tempdir', files_info)
+ create_root_fs_mock.assert_called_once_with('/tempdir', files_info)
+ os_listdir_mock.assert_called_once_with('/tempdir')
@mock.patch.object(utils, 'execute', autospec=True)
def test_create_vfat_image_dd_fails(self, execute_mock):
diff --git a/ironic/tests/unit/common/test_kickstart_utils.py b/ironic/tests/unit/common/test_kickstart_utils.py
index fffacf7d4..0dd1ac572 100644
--- a/ironic/tests/unit/common/test_kickstart_utils.py
+++ b/ironic/tests/unit/common/test_kickstart_utils.py
@@ -114,7 +114,7 @@ echo $CONTENT | /usr/bin/base64 --decode > {file_path}\n\
expected = self._get_expected_ks_config_drive(self.config_drive_dict)
with task_manager.acquire(self.context, self.node.uuid) as task:
i_info = task.node.instance_info
- i_info['configdrive'] = self.config_drive_dict
+ i_info['configdrive'] = CONFIG_DRIVE
task.node.instance_info = i_info
task.node.save()
self.assertEqual(expected, ks_utils.prepare_config_drive(task))
diff --git a/ironic/tests/unit/common/test_pxe_utils.py b/ironic/tests/unit/common/test_pxe_utils.py
index ef1e5d1f3..5ba0033a7 100644
--- a/ironic/tests/unit/common/test_pxe_utils.py
+++ b/ironic/tests/unit/common/test_pxe_utils.py
@@ -128,6 +128,10 @@ class TestPXEUtils(db_base.DbTestCase):
'boot_from_iso': True,
'boot_iso_url': 'http://1.2.3.4:1234/uuid/iso'
})
+ self.ipxe_options_boot_from_ramdisk = self.ipxe_options.copy()
+ self.ipxe_options_boot_from_ramdisk.update({
+ 'ramdisk_kernel_arguments': 'ramdisk_params'
+ })
self.node = object_utils.create_test_node(self.context)
@@ -290,6 +294,27 @@ class TestPXEUtils(db_base.DbTestCase):
expected_template = f.read().rstrip()
self.assertEqual(str(expected_template), rendered_template)
+ def test_default_ipxe_boot_from_ramdisk(self):
+ self.config(
+ pxe_config_template='ironic/drivers/modules/ipxe_config.template',
+ group='pxe'
+ )
+ self.config(http_url='http://1.2.3.4:1234', group='deploy')
+
+ pxe_options = self.ipxe_options_boot_from_ramdisk
+
+ rendered_template = utils.render_template(
+ CONF.pxe.pxe_config_template,
+ {'pxe_options': pxe_options,
+ 'ROOT': '{{ ROOT }}'},
+ )
+
+ templ_file = 'ironic/tests/unit/drivers/' \
+ 'ipxe_config_boot_from_ramdisk.template'
+ with open(templ_file) as f:
+ expected_template = f.read().rstrip()
+ self.assertEqual(str(expected_template), rendered_template)
+
def test_default_grub_config(self):
pxe_opts = self.pxe_options
pxe_opts['boot_mode'] = 'uefi'
@@ -1489,6 +1514,7 @@ class PXEBuildKickstartConfigOptionsTestCase(db_base.DbTestCase):
shared=True) as task:
expected = {}
expected['liveimg_url'] = task.node.instance_info['image_url']
+ expected['config_drive'] = ''
expected['heartbeat_url'] = (
'http://ironic-api/v1/heartbeat/%s' % task.node.uuid
)
@@ -1571,7 +1597,8 @@ class PXEBuildConfigOptionsTestCase(db_base.DbTestCase):
whle_dsk_img=False,
debug=False, mode='deploy',
ramdisk_params=None,
- expected_pxe_params=None):
+ expected_pxe_params=None,
+ ramdisk_kernel_opt=None):
self.config(debug=debug)
self.config(kernel_append_params='test_param', group='pxe')
@@ -1633,6 +1660,8 @@ class PXEBuildConfigOptionsTestCase(db_base.DbTestCase):
'ari_path': ramdisk,
'aki_path': kernel,
}
+ if ramdisk_kernel_opt:
+ expected_options.update({'ramdisk_opts': ramdisk_kernel_opt})
if mode == 'rescue':
self.node.provision_state = states.RESCUING
@@ -1658,6 +1687,10 @@ class PXEBuildConfigOptionsTestCase(db_base.DbTestCase):
del self.node.driver_internal_info['is_whole_disk_image']
self._test_build_pxe_config_options_pxe(debug=True, mode='rescue')
+ def test_build_pxe_config_options_pxe_opts_ramdisk_opt(self):
+ self.node.instance_info = {'ramdisk_kernel_arguments': 'cat meow'}
+ self._test_build_pxe_config_options_pxe(ramdisk_kernel_opt='cat meow')
+
def test_build_pxe_config_options_pxe_local_boot(self):
del self.node.driver_internal_info['is_whole_disk_image']
i_info = self.node.instance_info
@@ -1884,9 +1917,10 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
self.config(ipxe_use_swift=True, group='pxe')
glance = mock.Mock()
glance_mock.return_value = glance
- glance.swift_temp_url.side_effect = [
- pxe_kernel, pxe_ramdisk] = [
- 'swift_kernel', 'swift_ramdisk']
+ glance.swift_temp_url.side_effect = [pxe_kernel, pxe_ramdisk] = [
+ 'http://example.com/account/swift_kernel',
+ 'http://example.com/account/swift_ramdisk'
+ ]
image_info = {
kernel_label: (uuidutils.generate_uuid(),
os.path.join(root_dir,
@@ -1897,6 +1931,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
self.node.uuid,
ramdisk_label))
}
+ expected_initrd_filename = 'swift_ramdisk'
else:
pxe_kernel = os.path.join(http_url, self.node.uuid,
kernel_label)
@@ -1912,6 +1947,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
self.node.uuid,
ramdisk_label))
}
+ expected_initrd_filename = ramdisk_label
kernel = os.path.join(http_url, self.node.uuid, 'kernel')
ramdisk = os.path.join(http_url, self.node.uuid, 'ramdisk')
@@ -1946,7 +1982,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
'ipxe_timeout': ipxe_timeout_in_ms,
'ari_path': ramdisk,
'aki_path': kernel,
- 'initrd_filename': ramdisk_label,
+ 'initrd_filename': expected_initrd_filename,
}
if mode == 'rescue':
diff --git a/ironic/tests/unit/common/test_rpc_service.py b/ironic/tests/unit/common/test_rpc_service.py
index 4e190f5e6..8483bfb22 100644
--- a/ironic/tests/unit/common/test_rpc_service.py
+++ b/ironic/tests/unit/common/test_rpc_service.py
@@ -46,6 +46,8 @@ class TestRPCService(base.TestCase):
mock_rpc, mock_ios, mock_target, mock_prepare_method):
mock_rpc.return_value.start = mock.MagicMock()
self.rpc_svc.handle_signal = mock.MagicMock()
+ self.assertFalse(self.rpc_svc._started)
+ self.assertFalse(self.rpc_svc._failure)
self.rpc_svc.start()
mock_ctx.assert_called_once_with()
mock_target.assert_called_once_with(topic=self.rpc_svc.topic,
@@ -55,6 +57,9 @@ class TestRPCService(base.TestCase):
mock_init_method.assert_called_once_with(self.rpc_svc.manager,
mock_ctx.return_value)
self.assertIs(rpc.GLOBAL_MANAGER, self.rpc_svc.manager)
+ self.assertTrue(self.rpc_svc._started)
+ self.assertFalse(self.rpc_svc._failure)
+ self.rpc_svc.wait_for_start() # should be no-op
@mock.patch.object(manager.ConductorManager, 'prepare_host', autospec=True)
@mock.patch.object(oslo_messaging, 'Target', autospec=True)
@@ -77,3 +82,29 @@ class TestRPCService(base.TestCase):
mock_init_method.assert_called_once_with(self.rpc_svc.manager,
mock_ctx.return_value)
self.assertIs(rpc.GLOBAL_MANAGER, self.rpc_svc.manager)
+
+ @mock.patch.object(manager.ConductorManager, 'prepare_host', autospec=True)
+ @mock.patch.object(oslo_messaging, 'Target', autospec=True)
+ @mock.patch.object(objects_base, 'IronicObjectSerializer', autospec=True)
+ @mock.patch.object(rpc, 'get_server', autospec=True)
+ @mock.patch.object(manager.ConductorManager, 'init_host', autospec=True)
+ @mock.patch.object(context, 'get_admin_context', autospec=True)
+ def test_start_failure(self, mock_ctx, mock_init_method, mock_rpc,
+ mock_ios, mock_target, mock_prepare_method):
+ mock_rpc.return_value.start = mock.MagicMock()
+ self.rpc_svc.handle_signal = mock.MagicMock()
+ mock_init_method.side_effect = RuntimeError("boom")
+ self.assertFalse(self.rpc_svc._started)
+ self.assertFalse(self.rpc_svc._failure)
+ self.assertRaises(RuntimeError, self.rpc_svc.start)
+ mock_ctx.assert_called_once_with()
+ mock_target.assert_called_once_with(topic=self.rpc_svc.topic,
+ server="fake_host")
+ mock_ios.assert_called_once_with(is_server=True)
+ mock_prepare_method.assert_called_once_with(self.rpc_svc.manager)
+ mock_init_method.assert_called_once_with(self.rpc_svc.manager,
+ mock_ctx.return_value)
+ self.assertIsNone(rpc.GLOBAL_MANAGER)
+ self.assertFalse(self.rpc_svc._started)
+ self.assertIn("boom", self.rpc_svc._failure)
+ self.assertRaises(SystemExit, self.rpc_svc.wait_for_start)
diff --git a/ironic/tests/unit/conductor/test_deployments.py b/ironic/tests/unit/conductor/test_deployments.py
index 65bb1eca1..d86292aa9 100644
--- a/ironic/tests/unit/conductor/test_deployments.py
+++ b/ironic/tests/unit/conductor/test_deployments.py
@@ -353,7 +353,10 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
deployments.start_deploy(task, self.service, configdrive=None,
event='deploy', deploy_steps=deploy_steps)
node.refresh()
- self.assertTrue(mock_iwdi.called)
+ mock_iwdi.assert_called_once_with(task.context,
+ task.node.instance_info)
+ self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
+ self.assertEqual('partition', node.instance_info['image_type'])
mock_power_validate.assert_called_once_with(task.driver.power, task)
mock_deploy_validate.assert_called_once_with(task.driver.deploy, task)
mock_validate_traits.assert_called_once_with(task.node)
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index dee5b6974..f49633364 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -1859,12 +1859,15 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
# exc_info[1]
self.assertIn(r'node 1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
str(exc.exc_info[1]))
+ node.refresh()
# This is a sync operation last_error should be None.
self.assertIsNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
- self.assertNotIn('is_whole_disk_image', node.driver_internal_info)
+ # The image type must be set for validation to actually work
+ self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
+ self.assertEqual('partition', node.instance_info['image_type'])
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.validate',
autospec=True)
@@ -3410,6 +3413,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
self.context, driver='fake-hardware',
target_raid_config=target_raid_config,
network_interface='noop')
+ expected_info = dict(node.instance_info, image_type='partition')
ret = self.service.validate_driver_interfaces(self.context,
node.uuid)
expected = {'console': {'result': True},
@@ -3424,7 +3428,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
'rescue': {'result': True},
'bios': {'result': True}}
self.assertEqual(expected, ret)
- mock_iwdi.assert_called_once_with(self.context, node.instance_info)
+ mock_iwdi.assert_called_once_with(self.context, expected_info)
@mock.patch.object(fake.FakeDeploy, 'validate', autospec=True)
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
@@ -3435,11 +3439,12 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
network_interface='noop')
reason = 'fake reason'
mock_val.side_effect = exception.InvalidParameterValue(reason)
+ expected_info = dict(node.instance_info, image_type='partition')
ret = self.service.validate_driver_interfaces(self.context,
node.uuid)
self.assertFalse(ret['deploy']['result'])
self.assertEqual(reason, ret['deploy']['reason'])
- mock_iwdi.assert_called_once_with(self.context, node.instance_info)
+ mock_iwdi.assert_called_once_with(self.context, expected_info)
@mock.patch.object(fake.FakeDeploy, 'validate', autospec=True)
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
@@ -3447,6 +3452,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
self, mock_iwdi, mock_val):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
mock_val.side_effect = Exception('boom')
+ expected_info = dict(node.instance_info, image_type='whole-disk')
ret = self.service.validate_driver_interfaces(self.context,
node.uuid)
reason = ('Unexpected exception, traceback saved '
@@ -3454,8 +3460,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
'that is running on test-host: boom')
self.assertFalse(ret['deploy']['result'])
self.assertEqual(reason, ret['deploy']['reason'])
-
- mock_iwdi.assert_called_once_with(self.context, node.instance_info)
+ mock_iwdi.assert_called_once_with(self.context, expected_info)
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail_instance_traits(
@@ -3463,6 +3468,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
mock_iwdi.return_value = False
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
network_interface='noop')
+ expected_info = dict(node.instance_info, image_type='partition')
with mock.patch(
'ironic.conductor.utils.validate_instance_info_traits',
autospec=True) as ii_traits:
@@ -3472,7 +3478,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
node.uuid)
self.assertFalse(ret['deploy']['result'])
self.assertEqual(reason, ret['deploy']['reason'])
- mock_iwdi.assert_called_once_with(self.context, node.instance_info)
+ mock_iwdi.assert_called_once_with(self.context, expected_info)
@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail_deploy_templates(
@@ -3480,6 +3486,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
mock_iwdi.return_value = False
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
network_interface='noop')
+ expected_info = dict(node.instance_info, image_type='partition')
with mock.patch(
'ironic.conductor.steps'
'.validate_user_deploy_steps_and_templates',
@@ -3490,7 +3497,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
node.uuid)
self.assertFalse(ret['deploy']['result'])
self.assertEqual(reason, ret['deploy']['reason'])
- mock_iwdi.assert_called_once_with(self.context, node.instance_info)
+ mock_iwdi.assert_called_once_with(self.context, expected_info)
@mock.patch.object(manager.ConductorManager, '_fail_if_in_state',
autospec=True)
@@ -4985,6 +4992,15 @@ class ManagerDoSyncPowerStateTestCase(db_base.DbTestCase):
self.assertFalse(node_power_action.called)
self.assertFalse(self.task.upgrade_lock.called)
+ def test_state_unchanged_for_fake_node(self, node_power_action):
+ self._do_sync_power_state(None, None)
+
+ self.power.validate.assert_called_once_with(self.task)
+ self.power.get_power_state.assert_called_once_with(self.task)
+ self.assertIsNone(self.node.power_state)
+ self.assertFalse(node_power_action.called)
+ self.assertFalse(self.task.upgrade_lock.called)
+
@mock.patch.object(nova, 'power_update', autospec=True)
def test_state_not_set(self, mock_power_update, node_power_action):
self._do_sync_power_state(None, states.POWER_ON)
@@ -8181,7 +8197,7 @@ class DoNodeInspectAbortTestCase(mgr_utils.CommonMixIn,
"abort")
node.refresh()
self.assertTrue(mock_log.exception.called)
- self.assertIn('Failed to abort inspection.', node.last_error)
+ self.assertIn('Failed to abort inspection', node.last_error)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
diff --git a/ironic/tests/unit/drivers/ipxe_config_boot_from_ramdisk.template b/ironic/tests/unit/drivers/ipxe_config_boot_from_ramdisk.template
new file mode 100644
index 000000000..70f8a03f1
--- /dev/null
+++ b/ironic/tests/unit/drivers/ipxe_config_boot_from_ramdisk.template
@@ -0,0 +1,47 @@
+#!ipxe
+
+set attempts:int32 10
+set i:int32 0
+
+goto deploy
+
+:deploy
+imgfree
+kernel http://1.2.3.4:1234/deploy_kernel selinux=0 troubleshoot=0 text test_param BOOTIF=${mac} initrd=deploy_ramdisk || goto retry
+
+initrd http://1.2.3.4:1234/deploy_ramdisk || goto retry
+boot
+
+:retry
+iseq ${i} ${attempts} && goto fail ||
+inc i
+echo No response, retrying in ${i} seconds.
+sleep ${i}
+goto deploy
+
+:fail
+echo Failed to get a response after ${attempts} attempts
+echo Powering off in 30 seconds.
+sleep 30
+poweroff
+
+:boot_partition
+imgfree
+kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
+initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
+boot
+
+:boot_anaconda
+imgfree
+kernel http://1.2.3.4:1234/kernel text test_param inst.ks=http://fake/ks.cfg inst.stage2=http://fake/stage2 initrd=ramdisk || goto boot_anaconda
+initrd http://1.2.3.4:1234/ramdisk || goto boot_anaconda
+boot
+
+:boot_ramdisk
+imgfree
+kernel http://1.2.3.4:1234/kernel root=/dev/ram0 text test_param ramdisk_param initrd=ramdisk || goto boot_ramdisk
+initrd http://1.2.3.4:1234/ramdisk || goto boot_ramdisk
+boot
+
+:boot_whole_disk
+sanboot --no-describe
diff --git a/ironic/tests/unit/drivers/modules/drac/test_bios.py b/ironic/tests/unit/drivers/modules/drac/test_bios.py
index e24267f95..ab56fed0e 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_bios.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_bios.py
@@ -22,6 +22,7 @@ Test class for DRAC BIOS configuration specific methods
from unittest import mock
from dracclient import exceptions as drac_exceptions
+from oslo_utils import importutils
from oslo_utils import timeutils
from ironic.common import exception
@@ -36,6 +37,8 @@ from ironic import objects
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
from ironic.tests.unit.objects import utils as obj_utils
+drac_constants = importutils.try_import('dracclient.constants')
+
INFO_DICT = test_utils.INFO_DICT
@@ -73,6 +76,7 @@ class DracWSManBIOSConfigurationTestCase(test_utils.BaseDracTest):
}
self.mock_client.commit_pending_bios_changes.return_value = \
"JID_5678"
+ self.mock_client.get_power_state.return_value = drac_constants.POWER_ON
@mock.patch.object(drac_common, 'parse_driver_info',
autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/drac/test_raid.py b/ironic/tests/unit/drivers/modules/drac/test_raid.py
index fabfbcc97..780d2893c 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_raid.py
@@ -2286,8 +2286,12 @@ class DracRedfishRAIDTestCase(test_utils.BaseDracTest):
@mock.patch.object(drac_raid, '_retry_till_realtime_ready', autospec=True)
def test__wait_till_realtime_ready(self, mock_ready):
+ self.node.set_driver_internal_info('cleaning_disable_ramdisk', True)
task = mock.Mock(node=self.node, context=self.context)
+ task.driver.power.get_power_state.return_value = states.POWER_OFF
drac_raid._wait_till_realtime_ready(task)
+ task.driver.power.set_power_state.assert_called_once_with(
+ task, states.POWER_ON)
mock_ready.assert_called_once_with(task)
@mock.patch.object(drac_raid, 'LOG', autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py b/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py
index 60c66c024..e2c6e75b2 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py
@@ -11,7 +11,18 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+import shutil
+import tempfile
+from unittest import mock
+from urllib.parse import urlparse
+
+from oslo_utils import fileutils
+
from ironic.common import exception
+from ironic.common import image_service
+from ironic.common import swift
+from ironic.conf import CONF
from ironic.drivers.modules.redfish import firmware_utils
from ironic.tests import base
@@ -22,10 +33,12 @@ class FirmwareUtilsTestCase(base.TestCase):
firmware_images = [
{
"url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "checksum": "aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d",
"wait": 300
},
{
- "url": "https://192.0.2.10/NIC_19.0.12_A00.EXE"
+ "url": "https://192.0.2.10/NIC_19.0.12_A00.EXE",
+ "checksum": "9f6227549221920e312fed2cfc6586ee832cc546"
}
]
firmware_utils.validate_update_firmware_args(firmware_images)
@@ -33,6 +46,7 @@ class FirmwareUtilsTestCase(base.TestCase):
def test_validate_update_firmware_args_not_list(self):
firmware_images = {
"url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "checksum": "aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d",
"wait": 300
}
self.assertRaisesRegex(
@@ -43,10 +57,12 @@ class FirmwareUtilsTestCase(base.TestCase):
firmware_images = [
{
"url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "checksum": "aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d",
"wait": 300,
},
{
"url": "https://192.0.2.10/NIC_19.0.12_A00.EXE",
+ "checksum": "9f6227549221920e312fed2cfc6586ee832cc546",
"something": "unknown"
}
]
@@ -58,9 +74,11 @@ class FirmwareUtilsTestCase(base.TestCase):
firmware_images = [
{
"url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "checksum": "aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d",
"wait": 300,
},
{
+ "checksum": "9f6227549221920e312fed2cfc6586ee832cc546",
"wait": 300
}
]
@@ -72,6 +90,34 @@ class FirmwareUtilsTestCase(base.TestCase):
def test_validate_update_firmware_args_url_not_string(self):
firmware_images = [{
"url": 123,
+ "checksum": "aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d",
+ "wait": 300
+ }]
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue, "123 is not of type 'string'",
+ firmware_utils.validate_update_firmware_args, firmware_images)
+
+ def test_validate_update_firmware_args_checksum_missing(self):
+ firmware_images = [
+ {
+ "url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "checksum": "aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d",
+ "wait": 300,
+ },
+ {
+ "url": "https://192.0.2.10/NIC_19.0.12_A00.EXE",
+ "wait": 300
+ }
+ ]
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue,
+ "'checksum' is a required property",
+ firmware_utils.validate_update_firmware_args, firmware_images)
+
+ def test_validate_update_firmware_args_checksum_not_string(self):
+ firmware_images = [{
+ "url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "checksum": 123,
"wait": 300
}]
self.assertRaisesRegex(
@@ -81,8 +127,335 @@ class FirmwareUtilsTestCase(base.TestCase):
def test_validate_update_firmware_args_wait_not_int(self):
firmware_images = [{
"url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "checksum": "aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d",
"wait": 'abc'
}]
self.assertRaisesRegex(
exception.InvalidParameterValue, "'abc' is not of type 'integer'",
firmware_utils.validate_update_firmware_args, firmware_images)
+
+ def test_validate_update_firmware_args_source_not_known(self):
+ firmware_images = [{
+ "url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "checksum": "aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d",
+ "source": "abc"
+ }]
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue, "'abc' is not one of",
+ firmware_utils.validate_update_firmware_args, firmware_images)
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test_get_swift_temp_url(self, mock_swift_api):
+ mock_swift_api.return_value.get_temp_url.return_value = 'http://temp'
+ parsed_url = urlparse("swift://firmware/sub/bios.exe")
+
+ result = firmware_utils.get_swift_temp_url(parsed_url)
+
+ self.assertEqual(result, 'http://temp')
+ mock_swift_api.return_value.get_temp_url.assert_called_with(
+ 'firmware', 'sub/bios.exe',
+ CONF.redfish.swift_object_expiry_timeout)
+
+ @mock.patch.object(tempfile, 'gettempdir', autospec=True)
+ @mock.patch.object(os, 'makedirs', autospec=True)
+ @mock.patch.object(image_service, 'HttpImageService', autospec=True)
+ def test_download_to_temp_http(
+ self, mock_http_image_service, mock_makedirs, mock_gettempdir):
+ node = mock.Mock(uuid='9f0f6795-f74e-4b5a-850e-72f586a92435')
+ mock_gettempdir.return_value = '/tmp'
+ http_url = 'http://example.com/bios.exe'
+
+ with mock.patch.object(firmware_utils, 'open', mock.mock_open(),
+ create=True) as mock_open:
+ result = firmware_utils.download_to_temp(node, http_url)
+
+ exp_result = '/tmp/9f0f6795-f74e-4b5a-850e-72f586a92435/bios.exe'
+ exp_temp_dir = '/tmp/9f0f6795-f74e-4b5a-850e-72f586a92435'
+ mock_makedirs.assert_called_with(exp_temp_dir, exist_ok=True)
+ self.assertEqual(result, exp_result)
+ mock_http_image_service.return_value.download.assert_called_with(
+ http_url, mock_open.return_value)
+ mock_open.assert_has_calls([mock.call(exp_result, 'wb')])
+
+ @mock.patch.object(tempfile, 'gettempdir', autospec=True)
+ @mock.patch.object(os, 'makedirs', autospec=True)
+ @mock.patch.object(image_service, 'HttpImageService', autospec=True)
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test_download_to_temp_swift(
+ self, mock_swift_api, mock_http_image_service, mock_makedirs,
+ mock_gettempdir):
+ node = mock.Mock(uuid='9f0f6795-f74e-4b5a-850e-72f586a92435')
+ mock_gettempdir.return_value = '/tmp'
+ swift_url = 'swift://firmware/sub/bios.exe'
+ temp_swift_url = 'http://swift_temp'
+ mock_swift_api.return_value.get_temp_url.return_value = temp_swift_url
+
+ with mock.patch.object(firmware_utils, 'open', mock.mock_open(),
+ create=True) as mock_open:
+ result = firmware_utils.download_to_temp(node, swift_url)
+
+ exp_result = '/tmp/9f0f6795-f74e-4b5a-850e-72f586a92435/bios.exe'
+ exp_temp_dir = '/tmp/9f0f6795-f74e-4b5a-850e-72f586a92435'
+ mock_makedirs.assert_called_with(exp_temp_dir, exist_ok=True)
+ self.assertEqual(result, exp_result)
+ mock_http_image_service.return_value.download.assert_called_with(
+ temp_swift_url, mock_open.return_value)
+ mock_open.assert_has_calls([mock.call(exp_result, 'wb')])
+
+ @mock.patch.object(tempfile, 'gettempdir', autospec=True)
+ @mock.patch.object(os, 'makedirs', autospec=True)
+ @mock.patch.object(image_service, 'FileImageService', autospec=True)
+ def test_download_to_temp_file(
+ self, mock_file_image_service, mock_makedirs,
+ mock_gettempdir):
+ node = mock.Mock(uuid='9f0f6795-f74e-4b5a-850e-72f586a92435')
+ mock_gettempdir.return_value = '/tmp'
+ file_url = 'file:///firmware/bios.exe'
+
+ with mock.patch.object(firmware_utils, 'open', mock.mock_open(),
+ create=True) as mock_open:
+ result = firmware_utils.download_to_temp(node, file_url)
+
+ exp_result = '/tmp/9f0f6795-f74e-4b5a-850e-72f586a92435/bios.exe'
+ exp_temp_dir = '/tmp/9f0f6795-f74e-4b5a-850e-72f586a92435'
+ mock_makedirs.assert_called_with(exp_temp_dir, exist_ok=True)
+ self.assertEqual(result, exp_result)
+ mock_file_image_service.return_value.download.assert_called_with(
+ '/firmware/bios.exe', mock_open.return_value)
+ mock_open.assert_has_calls([mock.call(exp_result, 'wb')])
+
+ def test_download_to_temp_invalid(self):
+ node = mock.Mock(uuid='9f0f6795-f74e-4b5a-850e-72f586a92435')
+ self.assertRaises(
+ exception.InvalidParameterValue,
+ firmware_utils.download_to_temp, node, 'ftp://firmware/bios.exe')
+
+ @mock.patch.object(fileutils, 'compute_file_checksum', autospec=True)
+ def test_verify_checksum(self, mock_compute_file_checksum):
+ checksum = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
+ file_path = '/tmp/bios.exe'
+ mock_compute_file_checksum.return_value = checksum
+ node = mock.Mock(uuid='9f0f6795-f74e-4b5a-850e-72f586a92435')
+
+ firmware_utils.verify_checksum(node, checksum, file_path)
+
+ mock_compute_file_checksum.assert_called_with(
+ file_path, algorithm='sha1')
+
+ @mock.patch.object(fileutils, 'compute_file_checksum', autospec=True)
+ def test_verify_checksum_mismatch(self, mock_compute_file_checksum):
+ checksum1 = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
+ checksum2 = '9f6227549221920e312fed2cfc6586ee832cc546'
+ file_path = '/tmp/bios.exe'
+ mock_compute_file_checksum.return_value = checksum1
+ node = mock.Mock(uuid='9f0f6795-f74e-4b5a-850e-72f586a92435')
+
+ self.assertRaises(
+ exception.RedfishError, firmware_utils.verify_checksum, node,
+ checksum2, file_path)
+ mock_compute_file_checksum.assert_called_with(
+ file_path, algorithm='sha1')
+
+ @mock.patch.object(os, 'makedirs', autospec=True)
+ @mock.patch.object(shutil, 'copyfile', autospec=True)
+ @mock.patch.object(os, 'link', autospec=True)
+ @mock.patch.object(os, 'chmod', autospec=True)
+ def test_stage_http(self, mock_chmod, mock_link, mock_copyfile,
+ mock_makedirs):
+ CONF.deploy.http_url = 'http://10.0.0.2'
+ CONF.deploy.external_http_url = None
+ CONF.deploy.http_root = '/httproot'
+ node = mock.Mock(uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3')
+
+ staged_url, need_cleanup = firmware_utils.stage(
+ node, 'http', '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+
+ self.assertEqual(staged_url,
+ 'http://10.0.0.2/firmware/'
+ '55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+ self.assertEqual(need_cleanup, 'http')
+ mock_makedirs.assert_called_with(
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ exist_ok=True)
+ mock_link.assert_called_with(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe',
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+ mock_chmod.assert_called_with(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe',
+ CONF.redfish.file_permission)
+ mock_copyfile.assert_not_called()
+
+ @mock.patch.object(os, 'makedirs', autospec=True)
+ @mock.patch.object(shutil, 'copyfile', autospec=True)
+ @mock.patch.object(os, 'link', autospec=True)
+ @mock.patch.object(os, 'chmod', autospec=True)
+ def test_stage_http_copyfile(self, mock_chmod, mock_link, mock_copyfile,
+ mock_makedirs):
+ CONF.deploy.http_url = 'http://10.0.0.2'
+ CONF.deploy.external_http_url = None
+ CONF.deploy.http_root = '/httproot'
+ node = mock.Mock(uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3')
+ mock_link.side_effect = OSError
+
+ staged_url, need_cleanup = firmware_utils.stage(
+ node, 'http', '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+
+ self.assertEqual(staged_url,
+ 'http://10.0.0.2/firmware/'
+ '55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+ self.assertEqual(need_cleanup, 'http')
+ mock_makedirs.assert_called_with(
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ exist_ok=True)
+ mock_link.assert_called_with(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe',
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+ mock_copyfile.assert_called_with(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe',
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+ mock_chmod.assert_called_with(
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe',
+ CONF.redfish.file_permission)
+
+ @mock.patch.object(os, 'makedirs', autospec=True)
+ @mock.patch.object(shutil, 'copyfile', autospec=True)
+ @mock.patch.object(os, 'link', autospec=True)
+ @mock.patch.object(os, 'chmod', autospec=True)
+ def test_stage_http_copyfile_fails(self, mock_chmod, mock_link,
+ mock_copyfile, mock_makedirs):
+ CONF.deploy.http_url = 'http://10.0.0.2'
+ CONF.deploy.external_http_url = None
+ CONF.deploy.http_root = '/httproot'
+ node = mock.Mock(uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3')
+ mock_link.side_effect = OSError
+ mock_copyfile.side_effect = IOError
+
+ self.assertRaises(exception.RedfishError, firmware_utils.stage,
+ node, 'http',
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+
+ mock_makedirs.assert_called_with(
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ exist_ok=True)
+ mock_link.assert_called_with(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe',
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+ mock_copyfile.assert_called_with(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe',
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+ mock_chmod.assert_not_called()
+
+ @mock.patch.object(os, 'makedirs', autospec=True)
+ @mock.patch.object(shutil, 'copyfile', autospec=True)
+ @mock.patch.object(shutil, 'rmtree', autospec=True)
+ @mock.patch.object(os, 'link', autospec=True)
+ @mock.patch.object(os, 'chmod', autospec=True)
+ def test_stage_local_external(self, mock_chmod, mock_link, mock_rmtree,
+ mock_copyfile, mock_makedirs):
+ CONF.deploy.http_url = 'http://10.0.0.2'
+ CONF.deploy.external_http_url = 'http://90.0.0.9'
+ CONF.deploy.http_root = '/httproot'
+ node = mock.Mock(uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3')
+
+ staged_url, need_cleanup = firmware_utils.stage(
+ node, 'local',
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+
+ self.assertEqual(staged_url,
+ 'http://90.0.0.9/firmware/'
+ '55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+ self.assertEqual(need_cleanup, 'http')
+ mock_makedirs.assert_called_with(
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ exist_ok=True)
+ mock_link.assert_called_with(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe',
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe')
+ mock_chmod.assert_called_with(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe',
+ CONF.redfish.file_permission)
+ mock_copyfile.assert_not_called()
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test_stage_swift(self, mock_swift_api):
+ node = mock.Mock(uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3')
+ mock_swift_api.return_value.get_temp_url.return_value = 'http://temp'
+ temp_file = '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe'
+
+ staged_url, need_cleanup = firmware_utils.stage(
+ node, 'swift', temp_file)
+
+ self.assertEqual(staged_url, 'http://temp')
+ self.assertEqual(need_cleanup, 'swift')
+ exp_object_name = '55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe'
+ mock_swift_api.return_value.create_object.assert_called_with(
+ CONF.redfish.swift_container,
+ exp_object_name, temp_file,
+ object_headers={'X-Delete-After':
+ str(CONF.redfish.swift_object_expiry_timeout)})
+ mock_swift_api.return_value.get_temp_url.assert_called_with(
+ CONF.redfish.swift_container, exp_object_name,
+ CONF.redfish.swift_object_expiry_timeout)
+
+ @mock.patch.object(shutil, 'rmtree', autospec=True)
+ @mock.patch.object(tempfile, 'gettempdir', autospec=True)
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test_cleanup(self, mock_swift_api, mock_gettempdir, mock_rmtree):
+ mock_gettempdir.return_value = '/tmp'
+ CONF.deploy.http_root = '/httproot'
+ node = mock.Mock(
+ uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ driver_internal_info={'firmware_cleanup': ['http', 'swift']})
+ object_name = '55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe'
+ get_container = mock_swift_api.return_value.connection.get_container
+ get_container.return_value = (mock.Mock(), [{'name': object_name}])
+
+ firmware_utils.cleanup(node)
+
+ mock_rmtree.assert_any_call(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ ignore_errors=True)
+ mock_rmtree.assert_any_call(
+ '/httproot/firmware/55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ ignore_errors=True)
+ mock_swift_api.return_value.delete_object.assert_called_with(
+ CONF.redfish.swift_container, object_name)
+
+ @mock.patch.object(shutil, 'rmtree', autospec=True)
+ @mock.patch.object(tempfile, 'gettempdir', autospec=True)
+ def test_cleanup_notstaged(self, mock_gettempdir, mock_rmtree):
+ mock_gettempdir.return_value = '/tmp'
+ node = mock.Mock(
+ uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ driver_internal_info={'something': 'else'})
+
+ firmware_utils.cleanup(node)
+
+ mock_rmtree.assert_any_call(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ ignore_errors=True)
+
+ @mock.patch.object(shutil, 'rmtree', autospec=True)
+ @mock.patch.object(tempfile, 'gettempdir', autospec=True)
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ @mock.patch.object(firmware_utils.LOG, 'warning', autospec=True)
+ def test_cleanup_swift_fails(self, mock_warning, mock_swift_api,
+ mock_gettempdir, mock_rmtree):
+ mock_gettempdir.return_value = '/tmp'
+ node = mock.Mock(
+ uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ driver_internal_info={'firmware_cleanup': ['swift']})
+ object_name = '55cdaba0-1123-4622-8b37-bb52dd6285d3/file.exe'
+ get_container = mock_swift_api.return_value.connection.get_container
+ get_container.return_value = (mock.Mock(), [{'name': object_name}])
+ mock_swift_api.return_value.delete_object.side_effect =\
+ exception.SwiftOperationError
+
+ firmware_utils.cleanup(node)
+
+ mock_rmtree.assert_any_call(
+ '/tmp/55cdaba0-1123-4622-8b37-bb52dd6285d3',
+ ignore_errors=True)
+ mock_swift_api.return_value.delete_object.assert_called_with(
+ CONF.redfish.swift_container, object_name)
+ mock_warning.assert_called_once()
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_management.py b/ironic/tests/unit/drivers/modules/redfish/test_management.py
index b46700664..93aae5de8 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_management.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_management.py
@@ -27,8 +27,10 @@ from ironic.common import indicator_states
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
+from ironic.conf import CONF
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.redfish import boot as redfish_boot
+from ironic.drivers.modules.redfish import firmware_utils
from ironic.drivers.modules.redfish import management as redfish_mgmt
from ironic.drivers.modules.redfish import utils as redfish_utils
from ironic.tests.unit.db import base as db_base
@@ -834,22 +836,145 @@ class RedfishManagementTestCase(db_base.DbTestCase):
mock_update_service = mock.Mock()
mock_update_service.simple_update.return_value = mock_task_monitor
mock_get_update_service.return_value = mock_update_service
+ CONF.redfish.firmware_source = 'http'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.save = mock.Mock()
- task.driver.management.update_firmware(task,
- [{'url': 'test1'},
- {'url': 'test2'}])
+ task.driver.management.update_firmware(
+ task,
+ [{'url': 'http://test1',
+ 'checksum': 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'},
+ {'url': 'http://test2',
+ 'checksum': '9f6227549221920e312fed2cfc6586ee832cc546'}])
+
+ mock_get_update_service.assert_called_once_with(task.node)
+ mock_update_service.simple_update.assert_called_once_with(
+ 'http://test1')
+ self.assertIsNotNone(task.node
+ .driver_internal_info['firmware_updates'])
+ self.assertEqual(
+ [{'task_monitor': '/task/123', 'url': 'http://test1',
+ 'checksum': 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'},
+ {'url': 'http://test2',
+ 'checksum': '9f6227549221920e312fed2cfc6586ee832cc546'}],
+ task.node.driver_internal_info['firmware_updates'])
+ self.assertIsNone(
+ task.node.driver_internal_info.get('firmware_cleanup'))
+ mock_set_async_step_flags.assert_called_once_with(
+ task.node, reboot=True, skip_current_step=True, polling=True)
+ mock_get_async_step_return_state.assert_called_once_with(
+ task.node)
+ mock_node_power_action.assert_called_once_with(task, states.REBOOT)
+
+ @mock.patch.object(redfish_mgmt.RedfishManagement, '_stage_firmware_file',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options',
+ spec_set=True, autospec=True)
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_update_service', autospec=True)
+ def test_update_firmware_stage(
+ self, mock_get_update_service, mock_set_async_step_flags,
+ mock_get_async_step_return_state, mock_node_power_action,
+ mock_prepare, build_mock, mock_stage):
+ build_mock.return_value = {'a': 'b'}
+ mock_task_monitor = mock.Mock()
+ mock_task_monitor.task_monitor_uri = '/task/123'
+ mock_update_service = mock.Mock()
+ mock_update_service.simple_update.return_value = mock_task_monitor
+ mock_get_update_service.return_value = mock_update_service
+ mock_stage.return_value = ('http://staged/test1', 'http')
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.save = mock.Mock()
+
+ task.driver.management.update_firmware(
+ task,
+ [{'url': 'http://test1',
+ 'checksum': 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'},
+ {'url': 'http://test2',
+ 'checksum': '9f6227549221920e312fed2cfc6586ee832cc546'}])
mock_get_update_service.assert_called_once_with(task.node)
- mock_update_service.simple_update.assert_called_once_with('test1')
+ mock_update_service.simple_update.assert_called_once_with(
+ 'http://staged/test1')
self.assertIsNotNone(task.node
.driver_internal_info['firmware_updates'])
self.assertEqual(
- [{'task_monitor': '/task/123', 'url': 'test1'},
- {'url': 'test2'}],
+ [{'task_monitor': '/task/123', 'url': 'http://test1',
+ 'checksum': 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'},
+ {'url': 'http://test2',
+ 'checksum': '9f6227549221920e312fed2cfc6586ee832cc546'}],
task.node.driver_internal_info['firmware_updates'])
+ self.assertIsNotNone(
+ task.node.driver_internal_info['firmware_cleanup'])
+ self.assertEqual(
+ ['http'], task.node.driver_internal_info['firmware_cleanup'])
+ mock_set_async_step_flags.assert_called_once_with(
+ task.node, reboot=True, skip_current_step=True, polling=True)
+ mock_get_async_step_return_state.assert_called_once_with(
+ task.node)
+ mock_node_power_action.assert_called_once_with(task, states.REBOOT)
+
+ @mock.patch.object(redfish_mgmt.RedfishManagement, '_stage_firmware_file',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options',
+ spec_set=True, autospec=True)
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_update_service', autospec=True)
+ def test_update_firmware_stage_both(
+ self, mock_get_update_service, mock_set_async_step_flags,
+ mock_get_async_step_return_state, mock_node_power_action,
+ mock_prepare, build_mock, mock_stage):
+ build_mock.return_value = {'a': 'b'}
+ mock_task_monitor = mock.Mock()
+ mock_task_monitor.task_monitor_uri = '/task/123'
+ mock_update_service = mock.Mock()
+ mock_update_service.simple_update.return_value = mock_task_monitor
+ mock_get_update_service.return_value = mock_update_service
+ mock_stage.return_value = ('http://staged/test1', 'http')
+ info = self.node.driver_internal_info
+ info['firmware_cleanup'] = ['swift']
+ self.node.driver_internal_info = info
+ self.node.save()
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.save = mock.Mock()
+
+ task.driver.management.update_firmware(
+ task,
+ [{'url': 'http://test1',
+ 'checksum': 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'},
+ {'url': 'http://test2',
+ 'checksum': '9f6227549221920e312fed2cfc6586ee832cc546'}])
+
+ mock_get_update_service.assert_called_once_with(task.node)
+ mock_update_service.simple_update.assert_called_once_with(
+ 'http://staged/test1')
+ self.assertIsNotNone(task.node
+ .driver_internal_info['firmware_updates'])
+ self.assertEqual(
+ [{'task_monitor': '/task/123', 'url': 'http://test1',
+ 'checksum': 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'},
+ {'url': 'http://test2',
+ 'checksum': '9f6227549221920e312fed2cfc6586ee832cc546'}],
+ task.node.driver_internal_info['firmware_updates'])
+ self.assertIsNotNone(
+ task.node.driver_internal_info['firmware_cleanup'])
+ self.assertEqual(
+ ['swift', 'http'],
+ task.node.driver_internal_info['firmware_cleanup'])
mock_set_async_step_flags.assert_called_once_with(
task.node, reboot=True, skip_current_step=True, polling=True)
mock_get_async_step_return_state.assert_called_once_with(
@@ -1218,9 +1343,10 @@ class RedfishManagementTestCase(db_base.DbTestCase):
driver_internal_info = {
'something': 'else',
'firmware_updates': [
- {'task_monitor': '/task/123', 'url': 'test1'},
- {'url': 'test2'}]}
+ {'task_monitor': '/task/123', 'url': 'http://test1'},
+ {'url': 'http://test2'}]}
self.node.driver_internal_info = driver_internal_info
+ CONF.redfish.firmware_source = 'http'
management = redfish_mgmt.RedfishManagement()
with task_manager.acquire(self.context, self.node.uuid,
@@ -1230,19 +1356,88 @@ class RedfishManagementTestCase(db_base.DbTestCase):
management._continue_firmware_updates(
task,
mock_update_service,
- [{'task_monitor': '/task/123', 'url': 'test1'},
- {'url': 'test2'}])
+ [{'task_monitor': '/task/123', 'url': 'http://test1'},
+ {'url': 'http://test2'}])
self.assertTrue(mock_log.called)
- mock_update_service.simple_update.assert_called_once_with('test2')
+ mock_update_service.simple_update.assert_called_once_with(
+ 'http://test2')
self.assertIsNotNone(
task.node.driver_internal_info['firmware_updates'])
self.assertEqual(
- [{'url': 'test2', 'task_monitor': '/task/987'}],
+ [{'url': 'http://test2', 'task_monitor': '/task/987'}],
task.node.driver_internal_info['firmware_updates'])
task.node.save.assert_called_once_with()
mock_node_power_action.assert_called_once_with(task, states.REBOOT)
+ @mock.patch.object(firmware_utils, 'download_to_temp', autospec=True)
+ @mock.patch.object(firmware_utils, 'verify_checksum', autospec=True)
+ @mock.patch.object(firmware_utils, 'stage', autospec=True)
+ def test__stage_firmware_file_https(self, mock_stage, mock_verify_checksum,
+ mock_download_to_temp):
+ CONF.redfish.firmware_source = 'local'
+ firmware_update = {'url': 'https://test1', 'checksum': 'abc'}
+ node = mock.Mock()
+ mock_download_to_temp.return_value = '/tmp/test1'
+ mock_stage.return_value = ('http://staged/test1', 'http')
+
+ management = redfish_mgmt.RedfishManagement()
+
+ staged_url, needs_cleanup = management._stage_firmware_file(
+ node, firmware_update)
+
+ self.assertEqual(staged_url, 'http://staged/test1')
+ self.assertEqual(needs_cleanup, 'http')
+ mock_download_to_temp.assert_called_with(node, 'https://test1')
+ mock_verify_checksum.assert_called_with(node, 'abc', '/tmp/test1')
+ mock_stage.assert_called_with(node, 'local', '/tmp/test1')
+
+ @mock.patch.object(firmware_utils, 'download_to_temp', autospec=True)
+ @mock.patch.object(firmware_utils, 'verify_checksum', autospec=True)
+ @mock.patch.object(firmware_utils, 'stage', autospec=True)
+ @mock.patch.object(firmware_utils, 'get_swift_temp_url', autospec=True)
+ def test__stage_firmware_file_swift(
+ self, mock_get_swift_temp_url, mock_stage, mock_verify_checksum,
+ mock_download_to_temp):
+ CONF.redfish.firmware_source = 'swift'
+ firmware_update = {'url': 'swift://container/bios.exe'}
+ node = mock.Mock()
+ mock_get_swift_temp_url.return_value = 'http://temp'
+
+ management = redfish_mgmt.RedfishManagement()
+
+ staged_url, needs_cleanup = management._stage_firmware_file(
+ node, firmware_update)
+
+ self.assertEqual(staged_url, 'http://temp')
+ self.assertIsNone(needs_cleanup)
+ mock_download_to_temp.assert_not_called()
+ mock_verify_checksum.assert_not_called()
+ mock_stage.assert_not_called()
+
+ @mock.patch.object(firmware_utils, 'cleanup', autospec=True)
+ @mock.patch.object(firmware_utils, 'download_to_temp', autospec=True)
+ @mock.patch.object(firmware_utils, 'verify_checksum', autospec=True)
+ @mock.patch.object(firmware_utils, 'stage', autospec=True)
+ def test__stage_firmware_file_error(self, mock_stage, mock_verify_checksum,
+ mock_download_to_temp, mock_cleanup):
+ node = mock.Mock()
+ firmware_update = {'url': 'https://test1'}
+ CONF.redfish.firmware_source = 'local'
+ firmware_update = {'url': 'https://test1'}
+ node = mock.Mock()
+ mock_download_to_temp.return_value = '/tmp/test1'
+ mock_stage.side_effect = exception.IronicException
+
+ management = redfish_mgmt.RedfishManagement()
+ self.assertRaises(exception.IronicException,
+ management._stage_firmware_file, node,
+ firmware_update)
+ mock_download_to_temp.assert_called_with(node, 'https://test1')
+ mock_verify_checksum.assert_called_with(node, None, '/tmp/test1')
+ mock_stage.assert_called_with(node, 'local', '/tmp/test1')
+ mock_cleanup.assert_called_with(node)
+
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
def test_get_secure_boot_state(self, mock_get_system):
fake_system = mock_get_system.return_value
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_raid.py b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
index db2d825e3..ef3bba45e 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
@@ -76,7 +76,7 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
)
self.node = obj_utils.create_test_node(
self.context, driver='redfish', driver_info=INFO_DICT)
- self.mock_storage = mock.MagicMock()
+ self.mock_storage = mock.MagicMock(identity='RAID controller 1')
self.drive_id1 = '35D38F11ACEF7BD3'
self.drive_id2 = '3F5A8C54207B7233'
self.drive_id3 = '32ADF365C6C1B7BD'
@@ -422,6 +422,9 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
}
]
}
+ resource = mock.MagicMock(spec=['resource_name'])
+ resource.resource_name = 'volume'
+ self.mock_storage.volumes.create.return_value = resource
mock_get_system.return_value.storage.get_members.return_value = [
self.mock_storage]
self.node.target_raid_config = target_raid_config
@@ -471,6 +474,88 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
@mock.patch.object(deploy_utils, 'get_async_step_return_state',
autospec=True)
@mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_2_on_reset(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 100,
+ 'raid_level': '5',
+ 'is_root_volume': True,
+ 'disk_type': 'ssd'
+ },
+ {
+ 'size_gb': 500,
+ 'raid_level': '1',
+ 'disk_type': 'hdd'
+ }
+ ]
+ }
+ volumes = mock.MagicMock()
+ op_apply_time_support = mock.MagicMock()
+ op_apply_time_support.mapped_supported_values = [
+ sushy.APPLY_TIME_ON_RESET]
+ volumes.operation_apply_time_support = op_apply_time_support
+ self.mock_storage.volumes = volumes
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ task_mon = mock.MagicMock()
+ task_mon.task_monitor_uri = '/TaskService/123'
+ volumes.create.return_value = task_mon
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.create_configuration(task)
+ pre = '/redfish/v1/Systems/1/Storage/1/Drives/'
+ expected_payload = {
+ 'Encrypted': False,
+ 'VolumeType': 'Mirrored',
+ 'RAIDType': 'RAID1',
+ 'CapacityBytes': 536870912000,
+ 'Links': {
+ 'Drives': [
+ {'@odata.id': pre + self.drive_id1},
+ {'@odata.id': pre + self.drive_id2}
+ ]
+ }
+ }
+ expected_raid_configs = {
+ 'operation': 'create',
+ 'pending': {'RAID controller 1': [
+ {'controller': 'RAID controller 1',
+ 'disk_type': 'ssd',
+ 'is_root_volume': True,
+ 'physical_disks': [self.drive_id5,
+ self.drive_id6,
+ self.drive_id7],
+ 'raid_level': '5',
+ 'size_bytes': 107374182400,
+ 'span_depth': 1,
+ 'span_length': 3.0}]},
+ 'task_monitor_uri': ['/TaskService/123']}
+ self.assertEqual(
+ self.mock_storage.volumes.create.call_count, 1)
+ self.mock_storage.volumes.create.assert_called_with(
+ expected_payload, apply_time=sushy.APPLY_TIME_ON_RESET)
+ self.assertEqual(
+ expected_raid_configs,
+ task.node.driver_internal_info.get('raid_configs'))
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
def test_create_config_case_3(
self,
mock_set_async_step_flags,
@@ -574,6 +659,9 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
}
]
}
+ resource = mock.MagicMock(spec=['resource_name'])
+ resource.resource_name = 'volume'
+ self.mock_storage.volumes.create.return_value = resource
mock_get_system.return_value.storage.get_members.return_value = [
self.mock_storage]
self.node.target_raid_config = target_raid_config
@@ -686,6 +774,9 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
}
]
}
+ resource = mock.MagicMock(spec=['resource_name'])
+ resource.resource_name = 'volume'
+ self.mock_storage.volumes.create.return_value = resource
mock_get_system.return_value.storage.get_members.return_value = [
self.mock_storage]
self.node.target_raid_config = target_raid_config
@@ -797,6 +888,9 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
}
]
}
+ resource = mock.MagicMock(spec=['resource_name'])
+ resource.resource_name = 'volume'
+ self.mock_storage.volumes.create.return_value = resource
mock_get_system.return_value.storage.get_members.return_value = [
self.mock_storage]
self.node.target_raid_config = target_raid_config
@@ -913,7 +1007,7 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
shared=True) as task:
task.driver.raid.delete_configuration(task)
self.assertEqual(mock_volumes[0].delete.call_count, 1)
- self.assertEqual(mock_volumes[1].delete.call_count, 1)
+ self.assertEqual(mock_volumes[1].delete.call_count, 0)
mock_set_async_step_flags.assert_called_once_with(
task.node, reboot=True, skip_current_step=True, polling=True)
mock_get_async_step_return_state.assert_called_once_with(
@@ -921,6 +1015,11 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
mock_node_power_action.assert_called_once_with(task, states.REBOOT)
mock_build_agent_options.assert_called_once_with(task.node)
self.assertEqual(mock_prepare_ramdisk.call_count, 1)
+ self.assertEqual(
+ {'operation': 'delete',
+ 'pending': True,
+ 'task_monitor_uri': ['/TaskService/123']},
+ task.node.driver_internal_info.get('raid_configs'))
def test_volume_create_error_handler(self, mock_get_system):
volume_collection = self.mock_storage.volumes
@@ -956,22 +1055,6 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
task, sushy_error, volume_collection, expected_payload
)
- def test_validate(self, mock_get_system):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- task.node.properties['vendor'] = "Supported vendor"
-
- task.driver.raid.validate(task)
-
- def test_validate_unsupported_vendor(self, mock_get_system):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- task.node.properties['vendor'] = "Dell Inc."
-
- self.assertRaisesRegex(exception.InvalidParameterValue,
- "with vendor Dell.Inc.",
- task.driver.raid.validate, task)
-
def test_validate_raid_config(self, mock_get_system):
raid_config = {
'logical_disks': [
@@ -1058,3 +1141,228 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(storage, self.mock_storage)
nonraid_storage.drives.assert_not_called()
+
+ @mock.patch.object(redfish_utils, 'get_task_monitor', autospec=True)
+ @mock.patch.object(redfish_raid.LOG, 'info', autospec=True)
+ def test__raid_config_in_progress_success(
+ self, mock_info, mock_get_task_monitor, mock_get_system):
+ mock_task = mock.Mock()
+ mock_task.task_state = sushy.TASK_STATE_COMPLETED
+ mock_task.task_status = sushy.HEALTH_OK
+ mock_task.messages = []
+ mock_task_monitor = mock.Mock()
+ mock_task_monitor.is_processing = False
+ mock_task_monitor.get_task.return_value = mock_task
+ mock_get_task_monitor.return_value = mock_task_monitor
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+
+ raid = redfish_raid.RedfishRAID()
+ result = raid._raid_config_in_progress(
+ task, '/TaskService/123', 'create')
+ self.assertEqual(False, result)
+ mock_info.assert_called_once()
+
+ @mock.patch.object(redfish_utils, 'get_task_monitor', autospec=True)
+ @mock.patch.object(redfish_raid.LOG, 'info', autospec=True)
+ def test__raid_config_in_progress_task_mon_error(
+ self, mock_info, mock_get_task_monitor, mock_get_system):
+ mock_get_task_monitor.side_effect = exception.RedfishError(
+ error='Task not found')
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+
+ raid = redfish_raid.RedfishRAID()
+ result = raid._raid_config_in_progress(
+ task, '/TaskService/123', 'create')
+ self.assertEqual(False, result)
+ mock_info.assert_called_once()
+
+ @mock.patch.object(redfish_utils, 'get_task_monitor', autospec=True)
+ @mock.patch.object(redfish_raid.LOG, 'debug', autospec=True)
+ def test__raid_config_in_progress_still_processing(
+ self, mock_debug, mock_get_task_monitor, mock_get_system):
+ mock_task_monitor = mock.Mock()
+ mock_task_monitor.is_processing = True
+ mock_get_task_monitor.return_value = mock_task_monitor
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+
+ raid = redfish_raid.RedfishRAID()
+ result = raid._raid_config_in_progress(
+ task, '/TaskService/123', 'create')
+ self.assertEqual(True, result)
+ mock_debug.assert_called_once()
+
+ @mock.patch.object(redfish_utils, 'get_task_monitor', autospec=True)
+ @mock.patch.object(redfish_raid.LOG, 'error', autospec=True)
+ def test__raid_config_in_progress_failed(
+ self, mock_error, mock_get_task_monitor, mock_get_system):
+ mock_message = mock.Mock()
+ mock_message.message = 'RAID configuration failed'
+ mock_message.severity = sushy.SEVERITY_CRITICAL
+ mock_task = mock.Mock()
+ mock_task.task_state = sushy.TASK_STATE_COMPLETED
+ mock_task.task_status = sushy.HEALTH_CRITICAL
+ mock_task.messages = [mock_message]
+ mock_task_monitor = mock.Mock()
+ mock_task_monitor.is_processing = False
+ mock_task_monitor.get_task.return_value = mock_task
+ mock_get_task_monitor.return_value = mock_task_monitor
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+
+ raid = redfish_raid.RedfishRAID()
+ result = raid._raid_config_in_progress(
+ task, '/TaskService/123', 'create')
+ self.assertEqual(False, result)
+ mock_error.assert_called_once()
+
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_clean',
+ autospec=True)
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
+ autospec=True)
+ @mock.patch.object(redfish_utils, 'get_task_monitor',
+ autospec=True)
+ def test__check_node_raid_config_deploy(
+ self, mock_get_task_monitor, mock_resume_deploy,
+ mock_resume_clean, mock_get_system):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.deploy_step = {'priority': 100, 'interface': 'raid',
+ 'step': 'delete_configuration',
+ 'argsinfo': {}}
+ info = task.node.driver_internal_info
+ info['raid_configs'] = {'operation': 'delete', 'pending': {},
+ 'task_monitor_uri': ['/TaskService/123']}
+ task.node.driver_internal_info = info
+ task.node.save()
+
+ mock_task_monitor = mock_get_task_monitor.return_value
+ mock_task_monitor.is_processing = False
+ mock_task_monitor.response.status_code = 200
+
+ raid = redfish_raid.RedfishRAID()
+ raid._check_node_raid_config(task)
+
+ mock_resume_deploy.assert_called_with(task)
+ mock_resume_clean.assert_not_called()
+
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_clean',
+ autospec=True)
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
+ autospec=True)
+ @mock.patch.object(redfish_utils, 'get_task_monitor',
+ autospec=True)
+ def test__check_node_raid_config_clean(
+ self, mock_get_task_monitor, mock_resume_deploy,
+ mock_resume_clean, mock_get_system):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.clean_step = {'interface': 'raid',
+ 'step': 'delete_configuration',
+ 'argsinfo': {}}
+ info = task.node.driver_internal_info
+ info['raid_configs'] = {'operation': 'delete', 'pending': {},
+ 'task_monitor_uri': ['/TaskService/123']}
+ task.node.driver_internal_info = info
+ task.node.save()
+
+ mock_task_monitor = mock_get_task_monitor.return_value
+ mock_task_monitor.is_processing = False
+ mock_task_monitor.response.status_code = 200
+
+ raid = redfish_raid.RedfishRAID()
+ raid._check_node_raid_config(task)
+
+ mock_resume_deploy.assert_not_called()
+ mock_resume_clean.assert_called_with(task)
+
+ @mock.patch.object(redfish_utils, 'get_task_monitor',
+ autospec=True)
+ @mock.patch.object(redfish_raid.RedfishRAID,
+ '_submit_create_configuration', autospec=True)
+ @mock.patch.object(redfish_raid.RedfishRAID,
+ '_submit_delete_configuration', autospec=True)
+ @mock.patch.object(deploy_utils, 'reboot_to_finish_step', autospec=True)
+ def test__check_node_raid_config_pending_create(
+ self, mock_reboot, mock_submit_delete, mock_submit_create,
+ mock_get_task_monitor, mock_get_system):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.clean_step = {'interface': 'raid',
+ 'step': 'create_configuration',
+ 'argsinfo': {}}
+ info = task.node.driver_internal_info
+ raid_configs = {
+ 'operation': 'create',
+ 'pending': {'RAID controller 1': [
+ {'controller': 'RAID controller 1',
+ 'disk_type': 'ssd',
+ 'is_root_volume': True,
+ 'physical_disks': [self.drive_id5,
+ self.drive_id6,
+ self.drive_id7],
+ 'raid_level': '5',
+ 'size_bytes': 107374182400,
+ 'span_depth': 1,
+ 'span_length': 3.0}]},
+ 'task_monitor_uri': ['/TaskService/123']}
+ info['raid_configs'] = raid_configs
+ task.node.driver_internal_info = info
+ task.node.save()
+
+ mock_task_monitor = mock_get_task_monitor.return_value
+ mock_task_monitor.is_processing = False
+ mock_task_monitor.response.status_code = 200
+
+ mock_submit_create.return_value = ({}, True)
+
+ raid = redfish_raid.RedfishRAID()
+ raid._check_node_raid_config(task)
+
+ mock_submit_create.assert_called_with(
+ raid, task, raid_configs['pending'])
+ mock_submit_delete.assert_not_called()
+ mock_reboot.assert_called_with(task)
+
+ @mock.patch.object(redfish_utils, 'get_task_monitor',
+ autospec=True)
+ @mock.patch.object(redfish_raid.RedfishRAID,
+ '_submit_create_configuration', autospec=True)
+ @mock.patch.object(redfish_raid.RedfishRAID,
+ '_submit_delete_configuration', autospec=True)
+ @mock.patch.object(deploy_utils, 'reboot_to_finish_step', autospec=True)
+ def test__check_node_raid_config_pending_delete(
+ self, mock_reboot, mock_submit_delete, mock_submit_create,
+ mock_get_task_monitor, mock_get_system):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.clean_step = {'interface': 'raid',
+ 'step': 'delete_configuration',
+ 'argsinfo': {}}
+ info = task.node.driver_internal_info
+ raid_configs = {
+ 'operation': 'delete',
+ 'pending': True,
+ 'task_monitor_uri': ['/TaskService/123']}
+ info['raid_configs'] = raid_configs
+ task.node.driver_internal_info = info
+ task.node.save()
+
+ mock_task_monitor = mock_get_task_monitor.return_value
+ mock_task_monitor.is_processing = False
+ mock_task_monitor.response.status_code = 200
+
+ mock_submit_delete.return_value = ({}, False)
+
+ raid = redfish_raid.RedfishRAID()
+ raid._check_node_raid_config(task)
+
+ mock_submit_create.assert_not_called()
+ mock_submit_delete.assert_called_with(raid, task)
+ mock_reboot.assert_not_called()
diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py
index 586bdc07f..4b6d271d4 100644
--- a/ironic/tests/unit/drivers/modules/test_agent.py
+++ b/ironic/tests/unit/drivers/modules/test_agent.py
@@ -692,6 +692,15 @@ class TestAgentDeploy(CommonTestsMixin, db_base.DbTestCase):
task.driver.boot, task)
validate_http_mock.assert_called_once_with(task.node)
+ def test_validate_invalid_image_type(self):
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=False) as task:
+ task.node.instance_info['image_source'] = 'http://image-ref'
+ task.node.instance_info['image_type'] = 'passport photo'
+ self.assertRaisesRegex(exception.InvalidParameterValue,
+ 'passport photo',
+ self.driver.validate, task)
+
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
@mock.patch.object(deploy_utils, 'check_for_missing_params',
autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/test_agent_base.py b/ironic/tests/unit/drivers/modules/test_agent_base.py
index 7639c83f9..97daca79f 100644
--- a/ironic/tests/unit/drivers/modules/test_agent_base.py
+++ b/ironic/tests/unit/drivers/modules/test_agent_base.py
@@ -278,8 +278,8 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
task, mock.ANY, collect_logs=True)
log_mock.assert_called_once_with(
'Asynchronous exception for node %(node)s: %(err)s',
- {'err': 'Failed to process the next deploy step. '
- 'Error: LlamaException',
+ {'err': 'Failed to process the next deploy step: '
+ 'LlamaException',
'node': task.node.uuid})
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@@ -307,8 +307,8 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
self.assertFalse(failed_mock.called)
log_mock.assert_called_once_with(
'Asynchronous exception for node %(node)s: %(err)s',
- {'err': 'Failed to process the next deploy step. '
- 'Error: LlamaException',
+ {'err': 'Failed to process the next deploy step: '
+ 'LlamaException',
'node': task.node.uuid})
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@@ -460,8 +460,7 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
mock_finalize.assert_called_once_with(mock.ANY, task)
mock_rescue_err_handler.assert_called_once_with(
- task, 'Node failed to perform '
- 'rescue operation. Error: some failure')
+ task, 'Node failed to perform rescue operation: some failure')
@mock.patch.object(agent_base.LOG, 'error', autospec=True)
def test_heartbeat_records_cleaning_deploying(self, log_mock):
@@ -827,7 +826,7 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
'deployment do not support the command "sync"')
log_mock.assert_called_once_with(
'Failed to flush the file system prior to hard rebooting the '
- 'node %(node)s. Error: %(error)s',
+ 'node %(node)s: %(error)s',
{'node': task.node.uuid, 'error': log_error})
self.assertFalse(mock_collect.called)
diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
index bfa662fa8..e85ab631d 100644
--- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
@@ -1362,8 +1362,15 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
self.task = mock.Mock(context=self.context, node=self.node,
spec=['context', 'node'])
+ def test_validate_image_properties_local_boot(self):
+ inst_info = utils.get_image_instance_info(self.node)
+ utils.validate_image_properties(self.task, inst_info)
+
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='netboot')
@mock.patch.object(image_service, 'get_image_service', autospec=True)
- def test_validate_image_properties_glance_image(self, image_service_mock):
+ def test_validate_image_properties_glance_image(self, image_service_mock,
+ boot_options_mock):
inst_info = utils.get_image_instance_info(self.node)
image_service_mock.return_value.show.return_value = {
'properties': {'kernel_id': '1111', 'ramdisk_id': '2222'},
@@ -1374,9 +1381,11 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
self.node.instance_info['image_source'], context=self.context
)
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='netboot')
@mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_validate_image_properties_glance_image_missing_prop(
- self, image_service_mock):
+ self, image_service_mock, boot_options_mock):
inst_info = utils.get_image_instance_info(self.node)
image_service_mock.return_value.show.return_value = {
'properties': {'kernel_id': '1111'},
@@ -1406,9 +1415,11 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
self.node.instance_info['image_source'], context=self.context
)
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='netboot')
@mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_validate_image_properties_glance_image_not_authorized(
- self, image_service_mock):
+ self, image_service_mock, boot_options_mock):
inst_info = {'image_source': 'uuid'}
show_mock = image_service_mock.return_value.show
show_mock.side_effect = exception.ImageNotAuthorized(image_id='uuid')
@@ -1416,9 +1427,11 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
utils.validate_image_properties, self.task,
inst_info)
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='netboot')
@mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_validate_image_properties_glance_image_not_found(
- self, image_service_mock):
+ self, image_service_mock, boot_options_mock):
inst_info = {'image_source': 'uuid'}
show_mock = image_service_mock.return_value.show
show_mock.side_effect = exception.ImageNotFound(image_id='uuid')
@@ -1432,41 +1445,19 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
utils.validate_image_properties, self.task,
inst_info)
- @mock.patch.object(image_service.HttpImageService, 'show', autospec=True)
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='netboot')
def test_validate_image_properties_nonglance_image(
- self, image_service_show_mock):
+ self, boot_options_mock):
instance_info = {
'image_source': 'http://ubuntu',
'kernel': 'kernel_uuid',
'ramdisk': 'file://initrd',
'root_gb': 100,
}
- image_service_show_mock.return_value = {'size': 1, 'properties': {}}
self.node.instance_info = instance_info
inst_info = utils.get_image_instance_info(self.node)
utils.validate_image_properties(self.task, inst_info)
- image_service_show_mock.assert_called_once_with(
- mock.ANY, instance_info['image_source'])
-
- @mock.patch.object(image_service.HttpImageService, 'show', autospec=True)
- def test_validate_image_properties_nonglance_image_validation_fail(
- self, img_service_show_mock):
- instance_info = {
- 'image_source': 'http://ubuntu',
- 'kernel': 'kernel_uuid',
- 'ramdisk': 'file://initrd',
- 'root_gb': 100,
- }
- img_service_show_mock.side_effect = exception.ImageRefValidationFailed(
- image_href='http://ubuntu', reason='HTTPError')
- self.node.instance_info = instance_info
- inst_info = utils.get_image_instance_info(self.node)
- expected_error = ('Validation of image href http://ubuntu '
- 'failed, reason: HTTPError')
- error = self.assertRaises(exception.InvalidParameterValue,
- utils.validate_image_properties,
- self.task, inst_info)
- self.assertEqual(expected_error, str(error))
def test_validate_image_properties_boot_iso_conflict(self):
instance_info = {
@@ -1521,6 +1512,19 @@ class ValidateParametersTestCase(db_base.DbTestCase):
def test__get_img_instance_info_good_non_glance_image(self):
instance_info = INST_INFO_DICT.copy()
instance_info['image_source'] = 'http://image'
+
+ info = self._test__get_img_instance_info(instance_info=instance_info)
+
+ self.assertIsNotNone(info['image_source'])
+ self.assertNotIn('kernel', info)
+ self.assertNotIn('ramdisk', info)
+
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='netboot')
+ def test__get_img_instance_info_good_non_glance_image_netboot(
+ self, mock_boot_opt):
+ instance_info = INST_INFO_DICT.copy()
+ instance_info['image_source'] = 'http://image'
instance_info['kernel'] = 'http://kernel'
instance_info['ramdisk'] = 'http://ramdisk'
@@ -1530,7 +1534,10 @@ class ValidateParametersTestCase(db_base.DbTestCase):
self.assertIsNotNone(info['ramdisk'])
self.assertIsNotNone(info['kernel'])
- def test__get_img_instance_info_non_glance_image_missing_kernel(self):
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='netboot')
+ def test__get_img_instance_info_non_glance_image_missing_kernel(
+ self, mock_boot_opt):
instance_info = INST_INFO_DICT.copy()
instance_info['image_source'] = 'http://image'
instance_info['ramdisk'] = 'http://ramdisk'
@@ -1540,7 +1547,10 @@ class ValidateParametersTestCase(db_base.DbTestCase):
self._test__get_img_instance_info,
instance_info=instance_info)
- def test__get_img_instance_info_non_glance_image_missing_ramdisk(self):
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='netboot')
+ def test__get_img_instance_info_non_glance_image_missing_ramdisk(
+ self, mock_boot_opt):
instance_info = INST_INFO_DICT.copy()
instance_info['image_source'] = 'http://image'
instance_info['kernel'] = 'http://kernel'
@@ -1799,8 +1809,18 @@ class InstanceInfoTestCase(db_base.DbTestCase):
def test_parse_instance_info_nonglance_image(self):
info = INST_INFO_DICT.copy()
info['image_source'] = 'file:///image.qcow2'
+ node = obj_utils.create_test_node(
+ self.context, instance_info=info,
+ driver_internal_info=DRV_INTERNAL_INFO_DICT,
+ )
+ utils.parse_instance_info(node)
+
+ def test_parse_instance_info_nonglance_image_netboot(self):
+ info = INST_INFO_DICT.copy()
+ info['image_source'] = 'file:///image.qcow2'
info['kernel'] = 'file:///image.vmlinuz'
info['ramdisk'] = 'file:///image.initrd'
+ info['capabilities'] = {'boot_option': 'netboot'}
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
@@ -1811,6 +1831,7 @@ class InstanceInfoTestCase(db_base.DbTestCase):
info = INST_INFO_DICT.copy()
info['image_source'] = 'file:///image.qcow2'
info['ramdisk'] = 'file:///image.initrd'
+ info['capabilities'] = {'boot_option': 'netboot'}
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
@@ -1898,6 +1919,70 @@ class TestBuildInstanceInfoForDeploy(db_base.DbTestCase):
self, glance_mock, parse_instance_info_mock, validate_mock):
i_info = {}
i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810'
+ i_info['image_type'] = 'partition'
+ i_info['root_gb'] = 5
+ i_info['swap_mb'] = 4
+ i_info['ephemeral_gb'] = 0
+ i_info['ephemeral_format'] = None
+ i_info['configdrive'] = 'configdrive'
+ driver_internal_info = self.node.driver_internal_info
+ driver_internal_info['is_whole_disk_image'] = False
+ self.node.driver_internal_info = driver_internal_info
+ self.node.instance_info = i_info
+ self.node.save()
+
+ image_info = {'checksum': 'aa', 'disk_format': 'qcow2',
+ 'os_hash_algo': 'sha512', 'os_hash_value': 'fake-sha512',
+ 'container_format': 'bare',
+ 'properties': {'kernel_id': 'kernel',
+ 'ramdisk_id': 'ramdisk'}}
+ glance_mock.return_value.show = mock.MagicMock(spec_set=[],
+ return_value=image_info)
+ glance_obj_mock = glance_mock.return_value
+ glance_obj_mock.swift_temp_url.return_value = 'http://temp-url'
+ parse_instance_info_mock.return_value = {'swap_mb': 4}
+ image_source = '733d1c44-a2ea-414b-aca7-69decf20d810'
+ expected_i_info = {'root_gb': 5,
+ 'swap_mb': 4,
+ 'ephemeral_gb': 0,
+ 'ephemeral_format': None,
+ 'configdrive': 'configdrive',
+ 'image_source': image_source,
+ 'image_url': 'http://temp-url',
+ 'image_type': 'partition',
+ 'image_tags': [],
+ 'image_properties': {'kernel_id': 'kernel',
+ 'ramdisk_id': 'ramdisk'},
+ 'image_checksum': 'aa',
+ 'image_os_hash_algo': 'sha512',
+ 'image_os_hash_value': 'fake-sha512',
+ 'image_container_format': 'bare',
+ 'image_disk_format': 'qcow2'}
+ with task_manager.acquire(
+ self.context, self.node.uuid, shared=False) as task:
+
+ info = utils.build_instance_info_for_deploy(task)
+
+ glance_mock.assert_called_once_with(context=task.context)
+ glance_mock.return_value.show.assert_called_once_with(
+ self.node.instance_info['image_source'])
+ glance_mock.return_value.swift_temp_url.assert_called_once_with(
+ image_info)
+ validate_mock.assert_called_once_with(
+ mock.ANY, 'http://temp-url', secret=True)
+ image_type = task.node.instance_info['image_type']
+ self.assertEqual('partition', image_type)
+ self.assertEqual(expected_i_info, info)
+ parse_instance_info_mock.assert_called_once_with(task.node)
+
+ @mock.patch.object(image_service.HttpImageService, 'validate_href',
+ autospec=True)
+ @mock.patch.object(utils, 'parse_instance_info', autospec=True)
+ @mock.patch.object(image_service, 'GlanceImageService', autospec=True)
+ def test_build_instance_info_for_deploy_glance_partition_image_netboot(
+ self, glance_mock, parse_instance_info_mock, validate_mock):
+ i_info = {}
+ i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810'
i_info['kernel'] = '13ce5a56-1de3-4916-b8b2-be778645d003'
i_info['ramdisk'] = 'a5a370a8-1b39-433f-be63-2c7d708e4b4e'
i_info['root_gb'] = 5
@@ -1905,6 +1990,7 @@ class TestBuildInstanceInfoForDeploy(db_base.DbTestCase):
i_info['ephemeral_gb'] = 0
i_info['ephemeral_format'] = None
i_info['configdrive'] = 'configdrive'
+ i_info['capabilities'] = {'boot_option': 'netboot'}
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
@@ -1922,7 +2008,8 @@ class TestBuildInstanceInfoForDeploy(db_base.DbTestCase):
glance_obj_mock.swift_temp_url.return_value = 'http://temp-url'
parse_instance_info_mock.return_value = {'swap_mb': 4}
image_source = '733d1c44-a2ea-414b-aca7-69decf20d810'
- expected_i_info = {'root_gb': 5,
+ expected_i_info = {'capabilities': {'boot_option': 'netboot'},
+ 'root_gb': 5,
'swap_mb': 4,
'ephemeral_gb': 0,
'ephemeral_format': None,
diff --git a/ironic/tests/unit/drivers/modules/test_inspector.py b/ironic/tests/unit/drivers/modules/test_inspector.py
index 3356ce813..09d70eba9 100644
--- a/ironic/tests/unit/drivers/modules/test_inspector.py
+++ b/ironic/tests/unit/drivers/modules/test_inspector.py
@@ -13,6 +13,7 @@
from unittest import mock
import eventlet
+from keystoneauth1 import exceptions as ks_exception
import openstack
from ironic.common import context
@@ -61,6 +62,17 @@ class GetClientTestCase(db_base.DbTestCase):
self.assertEqual(1, mock_auth.call_count)
self.assertEqual(1, mock_session.call_count)
+ def test__get_client_connection_problem(
+ self, mock_conn, mock_session, mock_auth):
+ mock_conn.side_effect = ks_exception.DiscoveryFailure("")
+ self.assertRaises(exception.ConfigInvalid,
+ inspector._get_client, self.context)
+ mock_conn.assert_called_once_with(
+ session=mock.sentinel.session,
+ oslo_conf=mock.ANY)
+ self.assertEqual(1, mock_auth.call_count)
+ self.assertEqual(1, mock_session.call_count)
+
class BaseTestCase(db_base.DbTestCase):
def setUp(self):
diff --git a/ironic/tests/unit/drivers/modules/test_ipxe.py b/ironic/tests/unit/drivers/modules/test_ipxe.py
index 085c96c41..294133a4c 100644
--- a/ironic/tests/unit/drivers/modules/test_ipxe.py
+++ b/ironic/tests/unit/drivers/modules/test_ipxe.py
@@ -136,7 +136,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
autospec=True)
def test_validate_with_boot_iso(self, mock_boot_option, mock_glance):
self.node.instance_info = {
- 'boot_iso': "http://localhost:1234/boot.iso"
+ 'boot_iso': "glance://image"
}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
@@ -1091,6 +1091,64 @@ class iPXEBootTestCase(db_base.DbTestCase):
boot_devices.PXE,
persistent=True)
+ @mock.patch('os.path.isfile', lambda filename: False)
+ @mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
+ @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
+ @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
+ @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
+ @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
+ def test_prepare_instance_netboot_ramdisk_with_kernel_arg(
+ self, get_image_info_mock, cache_mock,
+ dhcp_factory_mock, switch_pxe_config_mock,
+ set_boot_device_mock, create_pxe_config_mock):
+ http_url = 'http://192.1.2.3:1234'
+ self.config(http_url=http_url, group='deploy')
+ self.config(enabled_deploy_interfaces='ramdisk')
+ provider_mock = mock.MagicMock()
+ dhcp_factory_mock.return_value = provider_mock
+ self.node.instance_info = {'ramdisk_kernel_arguments': 'cat meow'}
+ image_info = {'kernel': ('', '/path/to/kernel'),
+ 'deploy_kernel': ('', '/path/to/kernel'),
+ 'ramdisk': ('', '/path/to/ramdisk'),
+ 'deploy_ramdisk': ('', '/path/to/ramdisk')}
+ get_image_info_mock.return_value = image_info
+ self.node.provision_state = states.DEPLOYING
+ self.node.deploy_interface = 'ramdisk'
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ dhcp_opts = pxe_utils.dhcp_options_for_instance(task,
+ ipxe_enabled=True)
+ dhcp_opts += pxe_utils.dhcp_options_for_instance(
+ task, ipxe_enabled=True, ip_version=6)
+ pxe_config_path = pxe_utils.get_pxe_config_file_path(
+ task.node.uuid, ipxe_enabled=True)
+ task.driver.boot.prepare_instance(task)
+ self.assertTrue(get_image_info_mock.called)
+ self.assertTrue(cache_mock.called)
+ uuid = self.node.uuid
+ expected_params = {
+ 'aki_path': 'http://192.1.2.3:1234/' + uuid + '/kernel',
+ 'ari_path': 'http://192.1.2.3:1234/' + uuid + '/ramdisk',
+ 'ramdisk_opts': 'cat meow',
+ 'pxe_append_params': 'nofb nomodeset vga=normal ipa-debug=1 '
+ 'ipa-global-request-'
+ 'id=' + task.context.request_id,
+ 'tftp_server': mock.ANY,
+ 'ipxe_timeout': 0
+ }
+ provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
+ create_pxe_config_mock.assert_called_once_with(
+ task, expected_params, CONF.pxe.ipxe_config_template,
+ ipxe_enabled=True)
+ switch_pxe_config_mock.assert_called_once_with(
+ pxe_config_path, None, boot_modes.UEFI, False,
+ ipxe_enabled=True, iscsi_boot=False, ramdisk_boot=True,
+ anaconda_boot=False)
+ set_boot_device_mock.assert_called_once_with(task,
+ boot_devices.PXE,
+ persistent=True)
+
@mock.patch.object(boot_mode_utils, 'configure_secure_boot_if_needed',
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/test_pxe.py b/ironic/tests/unit/drivers/modules/test_pxe.py
index e1559b6f6..d999a8f7a 100644
--- a/ironic/tests/unit/drivers/modules/test_pxe.py
+++ b/ironic/tests/unit/drivers/modules/test_pxe.py
@@ -1022,15 +1022,23 @@ class PXEAnacondaDeployTestCase(db_base.DbTestCase):
mock_prepare_ks_config.assert_called_once_with(task, image_info,
anaconda_boot=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
- def test_prepare(self, mock_prepare_instance):
+ def test_prepare(self, mock_prepare_instance, mock_build_instance):
+
node = self.node
node.provision_state = states.DEPLOYING
node.instance_info = {}
node.save()
+ updated_instance_info = {'image_url': 'foo'}
+ mock_build_instance.return_value = updated_instance_info
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
self.assertFalse(mock_prepare_instance.called)
+ mock_build_instance.assert_called_once_with(task)
+ node.refresh()
+ self.assertEqual(updated_instance_info, node.instance_info)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_active(self, mock_prepare_instance):
diff --git a/releasenotes/notes/add-more-sources-redfish-firmware-update-3da89f10dc0f8d21.yaml b/releasenotes/notes/add-more-sources-redfish-firmware-update-3da89f10dc0f8d21.yaml
new file mode 100644
index 000000000..559ae2271
--- /dev/null
+++ b/releasenotes/notes/add-more-sources-redfish-firmware-update-3da89f10dc0f8d21.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ For ``redfish`` and ``idrac-redfish`` management interface
+ ``firmware_update`` clean step adds Swift, HTTP service and file system
+ support to serve and Ironic's HTTP and Swift service to stage files. Also
+ adds mandatory parameter ``checksum`` for file checksum verification.
+
+upgrade:
+ - |
+ For ``redfish`` and ``idrac-redfish`` management interface
+ ``firmware_update`` clean step there is now mandatory ``checksum``
+ parameter necessary. Update existing clean steps to include it, otherwise
+ clean step will fail with error "'checksum' is a required property".
diff --git a/releasenotes/notes/anaconda-config-drive-fixes-5880884e34584549.yaml b/releasenotes/notes/anaconda-config-drive-fixes-5880884e34584549.yaml
new file mode 100644
index 000000000..ca1a6a6e7
--- /dev/null
+++ b/releasenotes/notes/anaconda-config-drive-fixes-5880884e34584549.yaml
@@ -0,0 +1,19 @@
+---
+fixes:
+ - |
+ The anaconda deploy interface was treating the config drive
+ as a dict, whereas it could be a dict or in iso6600 format,
+ gzipped and base64-encoded. This has been fixed.
+ - |
+ The anaconda deploy interface was adding commands that deal with the
+ config drive, to the end of the kickstart config file. Which means
+ that they are handled after an ironic API request is sent (to the
+ conductor) to indicate that the node has been provisioned and is
+ ready to be rebooted. Which means that there is a possible race condition
+ wrt these commands being completed before the node is powered off.
+ A sync is added to ensure that all modifications have been written
+ to disk, before the API request is sent -- as the last thing.
+ - |
+ Extra newlines ('\n') were incorrectly added to the user data content.
+ This broke the content-type decoding and cloud-init was unable to
+ proces them. The extra newlines have been removed.
diff --git a/releasenotes/notes/anaconda-deploy-more-fixes-58d996c7031c8c4b.yaml b/releasenotes/notes/anaconda-deploy-more-fixes-58d996c7031c8c4b.yaml
new file mode 100644
index 000000000..9298c3036
--- /dev/null
+++ b/releasenotes/notes/anaconda-deploy-more-fixes-58d996c7031c8c4b.yaml
@@ -0,0 +1,33 @@
+---
+fixes:
+ - |
+ Fixes the logic for the anaconda deploy interface. If the
+ ironic node's instance_info doesn't have both 'stage2' and
+ 'ks_template' specified, we weren't using the instance_info
+ at all. This has been fixed to use the instance_info if it
+ was specified. Otherwise, 'stage2' is taken from the
+ image's properties (assumed that it is set there).
+ 'ks_template' value is from the image properties if specified
+ there (since it is optional); else we use the config setting
+ '[anaconda] default_ks_template'.
+ - |
+ For the anaconda deploy interface, the 'stage2' directory was
+ incorrectly being created using the full path of the stage2 file;
+ this has been fixed.
+ - |
+ The anaconda deploy interface expects the node's instance_info
+ to be populated with the 'image_url'; this is now populated
+ (via PXEAnacondaDeploy's prepare() method).
+ - |
+ For the anaconda deploy interface, when the deploy was finished
+ and the bm node was being rebooted, the node's provision state was
+ incorrectly being set to 'active' -- the provisioning state-machine
+ mechanism now handles that.
+ - |
+ For the anaconda deploy interface, the code that was doing the
+ validation of the kickstart file was incorrect and resulted in
+ errors; this has been addressed.
+ - |
+ For the anaconda deploy interface, the '%traceback' section in the
+ packaged 'ks.cfg.template' file is deprecated and fails validation,
+ so it has been removed.
diff --git a/releasenotes/notes/anaconda-instance-info-fix-a51837d8ac7b41de.yaml b/releasenotes/notes/anaconda-instance-info-fix-a51837d8ac7b41de.yaml
new file mode 100644
index 000000000..314d3ae9c
--- /dev/null
+++ b/releasenotes/notes/anaconda-instance-info-fix-a51837d8ac7b41de.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ The anaconda deploy interface was saving internal information in
+ the node's instance_info, in the user-facing 'stage2' and
+ 'ks_template' fields. This broke rebuilds using a different image
+ with different stage2 or template specified in the image properties.
+ This has been fixed by saving the information in the node's
+ driver_internal_info instead.
diff --git a/releasenotes/notes/api-none-3fdca1ccbb64d9b0.yaml b/releasenotes/notes/api-none-3fdca1ccbb64d9b0.yaml
new file mode 100644
index 000000000..d816be60d
--- /dev/null
+++ b/releasenotes/notes/api-none-3fdca1ccbb64d9b0.yaml
@@ -0,0 +1,12 @@
+---
+fixes:
+ - |
+ Fixes pagination for the following collections::
+
+ /v1/allocations
+ /v1/chassis
+ /v1/conductors
+ /v1/deploy_templates
+ /v1/nodes/{node}/history
+
+ The ``next`` link now contains a valid URL.
diff --git a/releasenotes/notes/fix-redfish-raid-deploy-steps-e9ee1ea3d1f2a475.yaml b/releasenotes/notes/fix-redfish-raid-deploy-steps-e9ee1ea3d1f2a475.yaml
index 2c05b295e..a9ac8fbfb 100644
--- a/releasenotes/notes/fix-redfish-raid-deploy-steps-e9ee1ea3d1f2a475.yaml
+++ b/releasenotes/notes/fix-redfish-raid-deploy-steps-e9ee1ea3d1f2a475.yaml
@@ -3,5 +3,5 @@ fixes:
- |
Fixes hardware type ``redfish`` RAID interface deploy steps when completion
requires rebooting system for non-immediate configuration application.
- Prior to this fix such nodes would remain in ``wait call-back`` state
+ Prior to this fix, such nodes would remain in ``wait call-back`` state
indefinitely.
diff --git a/releasenotes/notes/fix-redfish-raid-failed-tasks-02487c4698dea176.yaml b/releasenotes/notes/fix-redfish-raid-failed-tasks-02487c4698dea176.yaml
new file mode 100644
index 000000000..9f84967c0
--- /dev/null
+++ b/releasenotes/notes/fix-redfish-raid-failed-tasks-02487c4698dea176.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes the determination of a failed RAID configuration task in the
+ ``redfish`` hardware type. Prior to this fix, the tasks that have failed
+ were reported as successful.
diff --git a/releasenotes/notes/fix-redfish-raid-onreset-workflow-bfa44de6b0263a1f.yaml b/releasenotes/notes/fix-redfish-raid-onreset-workflow-bfa44de6b0263a1f.yaml
new file mode 100644
index 000000000..6cb8bdc1b
--- /dev/null
+++ b/releasenotes/notes/fix-redfish-raid-onreset-workflow-bfa44de6b0263a1f.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Fixes the ``redfish`` hardware type RAID device creation and deletion when
+ creating or deleting more than 1 logical disk on RAID controllers that
+ require rebooting and do not allow more than 1 running task per RAID
+ controller. Before this fix 2nd logical disk would fail to be created or
+ deleted. With this change it is now possible to use ``redfish`` ``raid``
+ interface on iDRAC systems.
diff --git a/releasenotes/notes/idrac-redfish-clean-steps-not-require-ramdisk-699e169af39b0dd6.yaml b/releasenotes/notes/idrac-redfish-clean-steps-not-require-ramdisk-699e169af39b0dd6.yaml
new file mode 100644
index 000000000..cfefc8128
--- /dev/null
+++ b/releasenotes/notes/idrac-redfish-clean-steps-not-require-ramdisk-699e169af39b0dd6.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds support for ``idrac-redfish`` RAID and management clean steps to be
+ run without IPA when disabling ramdisk during cleaning.
diff --git a/releasenotes/notes/idrac-wsman-clean-steps-not-require-ramdisk-ca98aa5c0a88f727.yaml b/releasenotes/notes/idrac-wsman-clean-steps-not-require-ramdisk-ca98aa5c0a88f727.yaml
new file mode 100644
index 000000000..fa478defe
--- /dev/null
+++ b/releasenotes/notes/idrac-wsman-clean-steps-not-require-ramdisk-ca98aa5c0a88f727.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds support for ``idrac-wsman`` RAID, BIOS and management clean steps to
+ be run without IPA when disabling ramdisk during cleaning.
diff --git a/releasenotes/notes/image-type-ac259a90393bdd2c.yaml b/releasenotes/notes/image-type-ac259a90393bdd2c.yaml
new file mode 100644
index 000000000..b693f060b
--- /dev/null
+++ b/releasenotes/notes/image-type-ac259a90393bdd2c.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ Introduces a new explicit ``instance_info`` parameter ``image_type``,
+ which can be used to distinguish between partition and whole disk images
+ instead of a ``kernel``/``ramdisk`` pair.
+
+ Adding ``kernel`` and ``ramdisk`` is no longer necessary for partition
+ images if ``image_type`` is set to ``partition`` and local boot is used.
+
+ The corresponding Image service property is called ``img_type``.
diff --git a/releasenotes/notes/initrd_filename-ac68e96f1c9fb576.yaml b/releasenotes/notes/initrd_filename-ac68e96f1c9fb576.yaml
new file mode 100644
index 000000000..b9eea262e
--- /dev/null
+++ b/releasenotes/notes/initrd_filename-ac68e96f1c9fb576.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes the ``initrd`` kernel parameter when booting ramdisk directly from
+ Swift/RadosGW using iPXE. Previously it was always ``deploy_ramdisk``,
+ even when the actual file name is different.
diff --git a/releasenotes/notes/known-issue-idrac-firmware-swift-721a19cac796e1ae.yaml b/releasenotes/notes/known-issue-idrac-firmware-swift-721a19cac796e1ae.yaml
new file mode 100644
index 000000000..0b53e1e5c
--- /dev/null
+++ b/releasenotes/notes/known-issue-idrac-firmware-swift-721a19cac796e1ae.yaml
@@ -0,0 +1,8 @@
+---
+issues:
+ - |
+ When using iDRAC with Swift to stage firmware update files in Management
+ interface ``firmware_update`` clean step of ``redfish`` or ``idrac``
+ hardware type, the cleaning fails with error "An internal error occurred.
+ Unable to complete the specified operation." in iDRAC job. Until this is
+ fixed, use HTTP service to stage firmware files for iDRAC.
diff --git a/releasenotes/notes/netboot-deprecation-fe5751a47df2d0b7.yaml b/releasenotes/notes/netboot-deprecation-fe5751a47df2d0b7.yaml
new file mode 100644
index 000000000..9c8df1106
--- /dev/null
+++ b/releasenotes/notes/netboot-deprecation-fe5751a47df2d0b7.yaml
@@ -0,0 +1,14 @@
+---
+deprecations:
+ - |
+ Booting final instances via network (as opposed to via a local bootloader)
+ is now deprecated, except for the cases of booting from volume or the
+ ramdisk deploy interface.
+
+ Network boot for whole disk images only works reliable for legacy (BIOS)
+ boot. In case of partition images, there is no way to update the kernel,
+ which makes this approach insecure.
+
+ Users of partition images must ensure that they either contain the
+ ``grub-install`` binary, enough EFI artifacts to boot the operating
+ system or a legacy boot partition.
diff --git a/releasenotes/notes/service-exit-77bcf3a538fab4bc.yaml b/releasenotes/notes/service-exit-77bcf3a538fab4bc.yaml
new file mode 100644
index 000000000..3d0aeee19
--- /dev/null
+++ b/releasenotes/notes/service-exit-77bcf3a538fab4bc.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Services (``ironic``, ``ironic-api``, ``ironic-conductor``) now correctly
+ return a non-zero exit code on start-up failures.
diff --git a/releasenotes/notes/service-wait-e85cbe7978f61764.yaml b/releasenotes/notes/service-wait-e85cbe7978f61764.yaml
new file mode 100644
index 000000000..c2fff601d
--- /dev/null
+++ b/releasenotes/notes/service-wait-e85cbe7978f61764.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ The ``ironic`` and ``ironic-conductor`` services now wait for the conductor
+ manager to start before notifying systemd about the successful start-up.
diff --git a/releasenotes/notes/swift_account_prefix-dbc9e68890bff47c.yaml b/releasenotes/notes/swift_account_prefix-dbc9e68890bff47c.yaml
new file mode 100644
index 000000000..c0a458f7d
--- /dev/null
+++ b/releasenotes/notes/swift_account_prefix-dbc9e68890bff47c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The new ``[glance] swift_account_prefix`` parameter has been added. This
+ parameter be set according to the ``reseller_prefix`` parameter in
+ ``proxy-server.conf`` of Swift.
diff --git a/releasenotes/notes/unix-socket-48e8f1caf4cb19f9.yaml b/releasenotes/notes/unix-socket-48e8f1caf4cb19f9.yaml
new file mode 100644
index 000000000..14fefaf73
--- /dev/null
+++ b/releasenotes/notes/unix-socket-48e8f1caf4cb19f9.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Supports listening on a Unix socket instead of a normal TCP socket.
+ This is useful with an HTTP server such as nginx in proxy mode.
diff --git a/releasenotes/notes/update-idrac-redfish-management-export-conf-9fb17273c4d9a050.yaml b/releasenotes/notes/update-idrac-redfish-management-export-conf-9fb17273c4d9a050.yaml
index a8753e2a7..ddae5b20c 100644
--- a/releasenotes/notes/update-idrac-redfish-management-export-conf-9fb17273c4d9a050.yaml
+++ b/releasenotes/notes/update-idrac-redfish-management-export-conf-9fb17273c4d9a050.yaml
@@ -1,7 +1,9 @@
---
features:
- |
- Updates ``idrac-redfish`` management interface export configuration step
- to not export iDRAC BMC connection settings to avoid overwriting those
- in another system when using unmodified configuration mold in import step.
- For import step it is still possible to add these settings back manually.
+ Now the export configuration step from the ``idrac-redfish``
+ management interface does not export iDRAC BMC connection
+ settings to avoid overwriting those in another system when
+ using unmodified configuration mold in import step.
+ For import step it is still possible to add these settings
+ back manually.
diff --git a/tools/benchmark/generate-statistics.py b/tools/benchmark/generate-statistics.py
index c6a7197c5..740c3be08 100644
--- a/tools/benchmark/generate-statistics.py
+++ b/tools/benchmark/generate-statistics.py
@@ -116,6 +116,7 @@ def _assess_db_object_and_api_performance(mock_log, mock_request):
total_nodes = 0
res = node_api_controller._get_nodes_collection(
+ resource_url='nodes',
chassis_uuid=None,
instance_uuid=None,
associated=None,
@@ -132,6 +133,7 @@ def _assess_db_object_and_api_performance(mock_log, mock_request):
print(" ** Getting nodes ** %s Elapsed: %s seconds." %
(total_nodes, _calculate_delta(start, time.time())))
res = node_api_controller._get_nodes_collection(
+ resource_url='nodes',
chassis_uuid=None,
instance_uuid=None,
associated=None,
diff --git a/tox.ini b/tox.ini
index f33138d4f..f0e04357c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -133,7 +133,7 @@ max-complexity=19
# [H203] Use assertIs(Not)None to check for None.
# [H204] Use assert(Not)Equal to check for equality.
# [H205] Use assert(Greater|Less)(Equal) for comparison.
-# [H210] Require ‘autospec’, ‘spec’, or ‘spec_set’ in mock.patch/mock.patch.object calls
+# [H210] Require 'autospec', 'spec', or 'spec_set' in mock.patch/mock.patch.object calls
# [H904] Delay string interpolations at logging calls.
enable-extensions=H106,H203,H204,H205,H210,H904
# [E402] Module level import not at top of file
diff --git a/zuul.d/ironic-jobs.yaml b/zuul.d/ironic-jobs.yaml
index cbf45a2f2..ff7727b7a 100644
--- a/zuul.d/ironic-jobs.yaml
+++ b/zuul.d/ironic-jobs.yaml
@@ -248,9 +248,6 @@
IRONIC_ENABLED_POWER_INTERFACES: redfish
IRONIC_ENABLED_MANAGEMENT_INTERFACES: redfish
IRONIC_AUTOMATED_CLEAN_ENABLED: False
- # TODO(TheJulia): We need to excise netboot from
- # jobs at some point.
- IRONIC_DEFAULT_BOOT_OPTION: netboot
IRONIC_ENABLED_BOOT_INTERFACES: redfish-virtual-media
SWIFT_ENABLE_TEMPURLS: True
SWIFT_TEMPURL_KEY: secretkey
@@ -276,6 +273,7 @@
tempest_test_regex: test_baremetal_introspection
devstack_localrc:
IRONIC_BOOT_MODE: bios
+ IRONIC_DEFAULT_BOOT_OPTION: netboot
IRONIC_INSPECTOR_MANAGED_BOOT: True
IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK: ''
IRONIC_AUTOMATED_CLEAN_ENABLED: False
@@ -324,7 +322,7 @@
- job:
name: ironic-tempest-wholedisk-bios-snmp-pxe
- description: SNMP power, no-op management, netboot and whole disk images.
+ description: SNMP power, no-op management and whole disk images.
parent: ironic-base
vars:
devstack_localrc:
@@ -337,21 +335,20 @@
IRONIC_BOOT_MODE: bios
- job:
- name: ironic-tempest-ipa-partition-uefi-pxe_ipmitool
- description: ironic-tempest-ipa-partition-uefi-pxe_ipmitool
+ name: ironic-tempest-partition-uefi-ipmi-pxe
+ description: IPMI power, UEFI, netboot, partition image.
parent: ironic-base
vars:
devstack_localrc:
- IRONIC_VM_SPECS_RAM: 4096
IRONIC_AUTOMATED_CLEAN_ENABLED: False
- IRONIC_DEFAULT_BOOT_OPTION: netboot
- job:
- name: ironic-tempest-ipa-partition-pxe_ipmitool
- description: ironic-tempest-ipa-partition-pxe_ipmitool that also tests cleaning.
+ name: ironic-tempest-partition-bios-ipmi-pxe
+ description: IPMI power, legacy BIOS, netboot, partition image and cleaning.
parent: ironic-base
vars:
devstack_localrc:
+ IRONIC_BOOT_MODE: bios
IRONIC_DEFAULT_BOOT_OPTION: netboot
IRONIC_AUTOMATED_CLEAN_ENABLED: True
@@ -607,6 +604,7 @@
devstack_localrc:
ENABLE_TENANT_TUNNELS: False
ENABLE_TENANT_VLANS: True
+ FORCE_CONFIG_DRIVE: True
HOST_TOPOLOGY: multinode
HOST_TOPOLOGY_ROLE: subnode
IRONIC_AUTOMATED_CLEAN_ENABLED: False
@@ -676,7 +674,6 @@
IRONIC_IPXE_ENABLED: False
IRONIC_RAMDISK_TYPE: tinyipa
IRONIC_AUTOMATED_CLEAN_ENABLED: False
- IRONIC_DEFAULT_BOOT_OPTION: netboot
IRONIC_VM_SPECS_RAM: 4096
- job:
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 869144ba3..5e054a26b 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -21,8 +21,8 @@
- ironic-tempest-partition-bios-redfish-pxe
- ironic-tempest-partition-uefi-redfish-vmedia
- ironic-tempest-wholedisk-bios-snmp-pxe
- - ironic-tempest-ipa-partition-pxe_ipmitool
- - ironic-tempest-ipa-partition-uefi-pxe_ipmitool
+ - ironic-tempest-partition-bios-ipmi-pxe
+ - ironic-tempest-partition-uefi-ipmi-pxe
# NOTE(TheJulia) Marking multinode non-voting on 20210311
# Due to a high failure rate on limestone where the compute1
# machine never appears to be able to communicate across the
@@ -67,8 +67,8 @@
- ironic-tempest-partition-bios-redfish-pxe
- ironic-tempest-partition-uefi-redfish-vmedia
- ironic-tempest-wholedisk-bios-snmp-pxe
- - ironic-tempest-ipa-partition-pxe_ipmitool
- - ironic-tempest-ipa-partition-uefi-pxe_ipmitool
+ - ironic-tempest-partition-bios-ipmi-pxe
+ - ironic-tempest-partition-uefi-ipmi-pxe
# NOTE(TheJulia): Disabled multinode on 20210311 due to Limestone
# seeming to be
# - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode