summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--devstack/lib/ironic56
-rw-r--r--doc/source/admin/adoption.rst3
-rw-r--r--doc/source/admin/anaconda-deploy-interface.rst13
-rw-r--r--doc/source/admin/dhcp-less.rst11
-rw-r--r--doc/source/admin/drivers.rst1
-rw-r--r--doc/source/admin/drivers/fake.rst36
-rw-r--r--doc/source/admin/drivers/ilo.rst289
-rw-r--r--doc/source/admin/drivers/irmc.rst53
-rw-r--r--doc/source/admin/drivers/redfish.rst52
-rw-r--r--doc/source/admin/drivers/snmp.rst74
-rw-r--r--doc/source/admin/interfaces/deploy.rst2
-rw-r--r--doc/source/admin/ramdisk-boot.rst5
-rw-r--r--doc/source/admin/report.txt1
-rw-r--r--doc/source/admin/secure-rbac.rst13
-rw-r--r--doc/source/admin/troubleshooting.rst41
-rw-r--r--doc/source/contributor/webapi-version-history.rst12
-rw-r--r--doc/source/install/advanced.rst2
-rw-r--r--doc/source/install/include/disk-label.inc6
-rw-r--r--doc/source/install/include/local-boot-partition-images.inc56
-rw-r--r--doc/source/install/refarch/common.rst41
-rw-r--r--doc/source/user/architecture.rst5
-rw-r--r--doc/source/user/deploy.rst8
-rw-r--r--driver-requirements.txt4
-rw-r--r--ironic/api/controllers/v1/node.py56
-rw-r--r--ironic/api/controllers/v1/utils.py12
-rw-r--r--ironic/api/controllers/v1/versions.py5
-rwxr-xr-xironic/common/args.py17
-rw-r--r--ironic/common/exception.py10
-rw-r--r--ironic/common/policy.py19
-rw-r--r--ironic/common/pxe_utils.py102
-rw-r--r--ironic/common/release_mappings.py22
-rw-r--r--ironic/conductor/cleaning.py2
-rw-r--r--ironic/conductor/manager.py52
-rw-r--r--ironic/conductor/verify.py2
-rw-r--r--ironic/conf/anaconda.py11
-rw-r--r--ironic/conf/api.py5
-rw-r--r--ironic/conf/conductor.py26
-rw-r--r--ironic/conf/default.py2
-rw-r--r--ironic/conf/deploy.py18
-rw-r--r--ironic/conf/ilo.py5
-rw-r--r--ironic/conf/molds.py4
-rw-r--r--ironic/db/api.py9
-rw-r--r--ironic/db/sqlalchemy/__init__.py4
-rw-r--r--ironic/db/sqlalchemy/api.py24
-rw-r--r--ironic/drivers/ilo.py5
-rw-r--r--ironic/drivers/modules/agent.py36
-rw-r--r--ironic/drivers/modules/agent_base.py12
-rw-r--r--ironic/drivers/modules/agent_config.template13
-rw-r--r--ironic/drivers/modules/ansible/deploy.py6
-rw-r--r--ironic/drivers/modules/boot.ipxe6
-rw-r--r--ironic/drivers/modules/deploy_utils.py17
-rw-r--r--ironic/drivers/modules/drac/raid.py82
-rw-r--r--ironic/drivers/modules/ilo/boot.py27
-rw-r--r--ironic/drivers/modules/ilo/common.py42
-rw-r--r--ironic/drivers/modules/ilo/management.py79
-rw-r--r--ironic/drivers/modules/ilo/power.py5
-rw-r--r--ironic/drivers/modules/ilo/vendor.py43
-rw-r--r--ironic/drivers/modules/image_cache.py11
-rw-r--r--ironic/drivers/modules/image_utils.py13
-rw-r--r--ironic/drivers/modules/ipxe_config.template8
-rw-r--r--ironic/drivers/modules/irmc/boot.py5
-rw-r--r--ironic/drivers/modules/irmc/common.py53
-rw-r--r--ironic/drivers/modules/ks.cfg.template23
-rw-r--r--ironic/drivers/modules/network/neutron.py11
-rw-r--r--ironic/drivers/modules/pxe_base.py44
-rw-r--r--ironic/drivers/modules/pxe_config.template6
-rw-r--r--ironic/drivers/modules/pxe_grub_config.template5
-rw-r--r--ironic/drivers/modules/redfish/bios.py42
-rw-r--r--ironic/drivers/modules/redfish/boot.py6
-rw-r--r--ironic/drivers/modules/redfish/raid.py10
-rw-r--r--ironic/drivers/modules/redfish/utils.py61
-rw-r--r--ironic/drivers/modules/snmp.py339
-rw-r--r--ironic/tests/base.py83
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py45
-rw-r--r--ironic/tests/unit/api/test_acl.py10
-rw-r--r--ironic/tests/unit/api/test_audit.py7
-rw-r--r--ironic/tests/unit/api/test_ospmiddleware.py4
-rw-r--r--ironic/tests/unit/api/test_rbac_project_scoped.yaml58
-rw-r--r--ironic/tests/unit/common/test_molds.py88
-rw-r--r--ironic/tests/unit/common/test_pxe_utils.py128
-rw-r--r--ironic/tests/unit/conductor/test_cleaning.py26
-rw-r--r--ironic/tests/unit/conductor/test_manager.py114
-rw-r--r--ironic/tests/unit/db/test_nodes.py36
-rw-r--r--ironic/tests/unit/drivers/boot-fallback.ipxe6
-rw-r--r--ironic/tests/unit/drivers/boot.ipxe6
-rw-r--r--ironic/tests/unit/drivers/ipxe_config.template8
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_boot_from_anaconda.template8
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_boot_from_iso.template8
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_boot_from_ramdisk.template8
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_boot_from_volume_extra_volume.template8
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_boot_from_volume_multipath.template8
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_boot_from_volume_no_extra_volumes.template8
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_timeout.template8
-rw-r--r--ironic/tests/unit/drivers/modules/ansible/test_deploy.py18
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_raid.py196
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_boot.py141
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_common.py94
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_management.py115
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_vendor.py71
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_boot.py108
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_common.py42
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_raid.py4
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_neutron.py60
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_bios.py35
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py26
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_management.py10
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_raid.py19
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_utils.py16
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent.py209
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent_base.py46
-rw-r--r--ironic/tests/unit/drivers/modules/test_deploy_utils.py275
-rw-r--r--ironic/tests/unit/drivers/modules/test_image_utils.py89
-rw-r--r--ironic/tests/unit/drivers/modules/test_ipxe.py328
-rw-r--r--ironic/tests/unit/drivers/modules/test_pxe.py274
-rw-r--r--ironic/tests/unit/drivers/modules/test_ramdisk.py23
-rw-r--r--ironic/tests/unit/drivers/modules/test_snmp.py82
-rw-r--r--ironic/tests/unit/drivers/pxe_config.template6
-rw-r--r--ironic/tests/unit/drivers/pxe_grub_config.template5
-rw-r--r--releasenotes/notes/ValueDisplayName-13837c653277ff08.yaml5
-rw-r--r--releasenotes/notes/additonal-snmp-drivers-ae1174e6bd6ee3a6.yaml5
-rw-r--r--releasenotes/notes/adds-kickstart-auto-url-in-template-9f716c244adff159.yaml5
-rw-r--r--releasenotes/notes/anaconda-permit-cert-validation-disable-6611d3cb9401031d.yaml8
-rw-r--r--releasenotes/notes/concurrency-limit-control-4b101bca7136e08d.yaml23
-rw-r--r--releasenotes/notes/correct-source-path-handling-lookups-4ce2023a56372f10.yaml16
-rw-r--r--releasenotes/notes/create_csr_clean_step-a720932f61b42118.yaml7
-rw-r--r--releasenotes/notes/deprecate-syslinux-support-98d327c67607fc8e.yaml2
-rw-r--r--releasenotes/notes/fast-track-bios-fa9ae685c151dd24.yaml6
-rw-r--r--releasenotes/notes/fix-cleaning-stuck-on-networkerror-4aedbf3673413af6.yaml8
-rw-r--r--releasenotes/notes/fix-idrac-redfish-controller-mode-7b55c58d09240d3c.yaml5
-rw-r--r--releasenotes/notes/fix-ilo-boot-interface-order-238a2da9933cf28c.yaml26
-rw-r--r--releasenotes/notes/fix-pxe-glance-lookup-anaconda-86fe616c6286ec08.yaml6
-rw-r--r--releasenotes/notes/ilo-event-subscription-0dadf136411bd16a.yaml7
-rw-r--r--releasenotes/notes/irmc-add-certification-file-option-34e7a0062c768e58.yaml10
-rw-r--r--releasenotes/notes/jsonschema-4.8-1146d103b877cffd.yaml5
-rw-r--r--releasenotes/notes/maximum-disk-erasure-concurrency-6d132bd84e3df4cf.yaml10
-rw-r--r--releasenotes/notes/no-netboot-d08f46c12edabd35.yaml6
-rw-r--r--releasenotes/notes/node-creation-no-longer-scope-restricted-b455f66a751f10ec.yaml27
-rw-r--r--releasenotes/notes/override-external_http_url-per-node-f5423b00b373e528.yaml8
-rw-r--r--releasenotes/notes/prevent-pxe-retry-when-token-exists-a4f38f7da56c1397.yaml2
-rw-r--r--releasenotes/notes/ramdisk-deploy-384a38c3c96059dd.yaml6
-rw-r--r--releasenotes/notes/redfish_consider_password_in_session_cache-1fa84234db179053.yaml7
-rw-r--r--releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml10
-rw-r--r--releasenotes/notes/suppress_chassis_not_found_error-99ee4b902d504ec7.yaml9
-rw-r--r--releasenotes/notes/version-foo-2eb39b768112547f.yaml6
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po283
-rw-r--r--releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po159
-rw-r--r--requirements.txt4
-rw-r--r--tools/config/ironic-config-generator.conf1
-rw-r--r--tox.ini1
-rw-r--r--zuul.d/ironic-jobs.yaml55
-rw-r--r--zuul.d/project.yaml2
151 files changed, 3471 insertions, 2437 deletions
diff --git a/devstack/lib/ironic b/devstack/lib/ironic
index 90842cfaf..08cccce7a 100644
--- a/devstack/lib/ironic
+++ b/devstack/lib/ironic
@@ -681,12 +681,6 @@ if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then
fi
fi
-# TODO(dtantsur): change this when we change the default value.
-IRONIC_DEFAULT_BOOT_OPTION=${IRONIC_DEFAULT_BOOT_OPTION:-local}
-if [ $IRONIC_DEFAULT_BOOT_OPTION != "netboot" ] && [ $IRONIC_DEFAULT_BOOT_OPTION != "local" ]; then
- die $LINENO "Supported values for IRONIC_DEFAULT_BOOT_OPTION are 'netboot' and 'local' only."
-fi
-
# TODO(pas-ha) find a way to (cross-)sign the custom CA bundle used by tls-proxy
# with default iPXE cert - for reference see http://ipxe.org/crypto
if is_service_enabled tls-proxy && [[ "$IRONIC_IPXE_USE_SWIFT" == "True" ]]; then
@@ -1338,6 +1332,17 @@ function configure_ironic_networks {
configure_ironic_cleaning_network
echo_summary "Configuring Ironic rescue network"
configure_ironic_rescue_network
+ echo_summary "Configuring Neutron Private Subnet, if needed."
+ configure_ironic_private_subnet
+}
+
+function configure_ironic_private_subnet {
+ if [[ "${IRONIC_ANACONDA_IMAGE_REF:-}" != "" ]]; then
+ # NOTE(TheJulia): Anaconda needs DNS for FQDN resolution
+ # and devstack doesn't create this network with dns.
+ subnet_id=$(openstack --os-cloud $OS_CLOUD subnet show private-subnet -f value -c id)
+ openstack --os-cloud $OS_CLOUD subnet set --dns-nameserver 8.8.8.8 $subnet_id
+ fi
}
function configure_ironic_cleaning_network {
@@ -1411,7 +1416,8 @@ function configure_ironic_provision_network {
${net_segment_id:+--network-segment $net_segment_id} \
$IRONIC_PROVISION_PROVIDER_SUBNET_NAME \
--gateway $IRONIC_PROVISION_SUBNET_GATEWAY --network $net_id \
- --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX -f value -c id)"
+ --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX \
+ --dns-nameserver 8.8.8.8 -f value -c id)"
else
# NOTE(TheJulia): Consider changing this to stateful to support UEFI once we move
# CI to Ubuntu Jammy as it will support v6 and v4 UEFI firmware driven boot ops.
@@ -1855,8 +1861,6 @@ function configure_ironic_conductor {
iniset $IRONIC_CONF_FILE dhcp dhcp_provider $IRONIC_DHCP_PROVIDER
- iniset $IRONIC_CONF_FILE deploy default_boot_option $IRONIC_DEFAULT_BOOT_OPTION
-
isolinux=$(find -L /usr -type f -name "isolinux.bin" | head -1)
if [[ -n "$isolinux" ]]; then
iniset $IRONIC_CONF_FILE DEFAULT isolinux_bin "$isolinux"
@@ -2903,8 +2907,7 @@ function upload_image_if_needed {
# Change the default image only if the provided settings prevent the
# default cirros image from working.
- if [[ "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" != True \
- && "$IRONIC_DEFAULT_BOOT_OPTION" == local ]]; then
+ if [[ "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" != True ]]; then
IRONIC_IMAGE_NAME=$IRONIC_PARTITIONED_IMAGE_NAME
DEFAULT_IMAGE_NAME=$IRONIC_IMAGE_NAME
fi
@@ -3066,6 +3069,16 @@ function upload_baremetal_ironic_deploy {
iniset $IRONIC_CONF_FILE conductor deploy_ramdisk $IRONIC_DEPLOY_RAMDISK_ID
iniset $IRONIC_CONF_FILE conductor rescue_kernel $IRONIC_DEPLOY_KERNEL_ID
iniset $IRONIC_CONF_FILE conductor rescue_ramdisk $IRONIC_DEPLOY_RAMDISK_ID
+
+ if [[ "${IRONIC_ANACONDA_INSECURE_HEARTBEAT:-}" != "" ]]; then
+ iniset $IRONIC_CONF_FILE anaconda insecure_heartbeat ${IRONIC_ANACONDA_INSECURE_HEARTBEAT:-}
+ fi
+ # NOTE(TheJulia): Compared to an image deploy, anaconda is relatively
+ # slow as it installs packages one at a time. As such, we need an option
+ # to extend.
+ if [[ "${IRONIC_DEPLOY_CALLBACK_WAIT_TIMEOUT:-}" != "" ]]; then
+ iniset $IRONIC_CONF_FILE conductor deploy_callback_timeout ${IRONIC_DEPLOY_CALLBACK_WAIT_TIMEOUT:-}
+ fi
}
function prepare_baremetal_basic_ops {
@@ -3230,6 +3243,23 @@ function ironic_configure_tempest {
if [[ "$IRONIC_RAMDISK_IMAGE" != "" ]]; then
iniset $TEMPEST_CONFIG baremetal ramdisk_iso_image_ref "$IRONIC_RAMDISK_IMAGE"
fi
+ if [[ "${IRONIC_ANACONDA_IMAGE_REF:-}" != "" ]]; then
+ # In a perfect world we would use *just* the opendev repo
+ # mirror, and let things be magical, but OpenDev Infra cannot
+ # mirror the /images path with the limited storage space.
+ iniset $TEMPEST_CONFIG baremetal anaconda_image_ref ${IRONIC_ANACONDA_IMAGE_REF:-}
+ fi
+ if [[ "${IRONIC_ANACONDA_KERNEL_REF:-}" != "" ]]; then
+ iniset $TEMPEST_CONFIG baremetal anaconda_kernel_ref ${IRONIC_ANACONDA_KERNEL_REF:-}
+ fi
+ if [[ "${IRONIC_ANACONDA_RAMDISK_REF:-}" != "" ]]; then
+ iniset $TEMPEST_CONFIG baremetal anaconda_initial_ramdisk_ref ${IRONIC_ANACONDA_RAMDISK_REF:-}
+ fi
+ if [[ "${IRONIC_ANACONDA_STAGE2_REF:-}" != "" ]]; then
+ iniset $TEMPEST_CONFIG baremetal anaconda_stage2_ramdisk_ref ${IRONIC_ANACONDA_STAGE2_REF:-}
+
+ fi
+
# NOTE(dtantsur): keep this option here until the defaults change in
# ironic-tempest-plugin to disable classic drivers testing.
iniset $TEMPEST_CONFIG baremetal enabled_drivers ""
@@ -3242,7 +3272,9 @@ function ironic_configure_tempest {
# Driver for API tests
iniset $TEMPEST_CONFIG baremetal driver fake-hardware
- iniset $TEMPEST_CONFIG baremetal default_boot_option $IRONIC_DEFAULT_BOOT_OPTION
+ # NOTE(dtantsur): remove this when the tempest plugin no longer supports
+ # netboot (i.e. when Zed is the oldest supported branch).
+ iniset $TEMPEST_CONFIG baremetal default_boot_option local
local adjusted_root_disk_size_gb
if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then
diff --git a/doc/source/admin/adoption.rst b/doc/source/admin/adoption.rst
index 570b36072..3a9d1d14a 100644
--- a/doc/source/admin/adoption.rst
+++ b/doc/source/admin/adoption.rst
@@ -135,8 +135,7 @@ from the ``manageable`` state to ``active`` state::
baremetal port create <node_mac_address> --node <node_uuid>
baremetal node set testnode \
- --instance-info image_source="http://localhost:8080/blankimage" \
- --instance-info capabilities="{\"boot_option\": \"local\"}"
+ --instance-info image_source="http://localhost:8080/blankimage"
baremetal node manage testnode --wait
diff --git a/doc/source/admin/anaconda-deploy-interface.rst b/doc/source/admin/anaconda-deploy-interface.rst
index e715a1b23..f48926668 100644
--- a/doc/source/admin/anaconda-deploy-interface.rst
+++ b/doc/source/admin/anaconda-deploy-interface.rst
@@ -187,7 +187,8 @@ as it accounts for the particular stages and appropriate callbacks to
Ironic.
.. warning::
- The default template expects a ``instance_info\liveimg_url`` setting to
+ The default template (for the kickstart 'liveimg' command) expects an
+ ``instance_info\image_info`` setting to
be provided by the user, which serves as a base operating system image.
In the context of the anaconda driver, it should be thought of almost
like "stage3". If you're using a custom template, it may not be required,
@@ -201,12 +202,13 @@ Ironic.
--instance_info ks_template=<URL>
If you do choose to use a liveimg with a customized template, or if you wish
-to use the stock template with a liveimg, you will need to provide parameter.
+to use the stock template with a liveimg, you will need to provide this
+setting.
.. code-block:: shell
baremetal node set <node> \
- --instance_info liveimg_url=<URL>
+ --instance_info image_info=<URL>
.. warning::
This is required if you do *not* utilize a customised template. As in use
@@ -275,5 +277,10 @@ Limitations
This deploy interface has only been tested with Red Hat based operating systems
that use anaconda. Other systems are not supported.
+Runtime TLS certifiate injection into ramdisks is not supported. Assets such
+as ``ramdisk`` or a ``stage2`` ramdisk image need to have trusted Certificate
+Authority certificates present within the images *or* the Ironic API endpoint
+utilized should utilize a known trusted Certificate Authority.
+
.. _`anaconda`: https://fedoraproject.org/wiki/Anaconda
.. _`ks.cfg.template`: https://opendev.org/openstack/ironic/src/branch/master/ironic/drivers/modules/ks.cfg.template
diff --git a/doc/source/admin/dhcp-less.rst b/doc/source/admin/dhcp-less.rst
index 089829e25..a14c2499d 100644
--- a/doc/source/admin/dhcp-less.rst
+++ b/doc/source/admin/dhcp-less.rst
@@ -127,3 +127,14 @@ the service catalog or configured in the ``[service_catalog]`` section:
[deploy]
external_callback_url = <Bare Metal API URL with a routable IP address>
+
+
+In case you need specific URLs for each node, you can use the
+``driver_info[external_http_url]`` node property. When used it overrides the
+``[deploy]http_url`` and ``[deploy]external_http_url`` settings in the
+configuration file.
+
+.. code-block:: bash
+
+ baremetal node set node-0 \
+ --driver-info external_http_url="<your_node_external_url>"
diff --git a/doc/source/admin/drivers.rst b/doc/source/admin/drivers.rst
index c3d8eb377..f35cb2dfa 100644
--- a/doc/source/admin/drivers.rst
+++ b/doc/source/admin/drivers.rst
@@ -26,6 +26,7 @@ Hardware Types
drivers/redfish
drivers/snmp
drivers/xclarity
+ drivers/fake
Changing Hardware Types and Interfaces
--------------------------------------
diff --git a/doc/source/admin/drivers/fake.rst b/doc/source/admin/drivers/fake.rst
new file mode 100644
index 000000000..ea7d7ef4c
--- /dev/null
+++ b/doc/source/admin/drivers/fake.rst
@@ -0,0 +1,36 @@
+===========
+Fake driver
+===========
+
+Overview
+========
+
+The ``fake-hardware`` hardware type is what it claims to be: fake. Use of this
+type or the ``fake`` interfaces should be temporary or limited to
+non-production environments, as the ``fake`` interfaces do not perform any of
+the actions typically expected.
+
+The ``fake`` interfaces can be configured to be combined with any of the
+"real" hardware interfaces, allowing you to effectively disable one or more
+hardware interfaces for testing by simply setting that interface to
+``fake``.
+
+Use cases
+=========
+
+Development
+-----------
+Developers can use ``fake-hardware`` hardware-type to mock out nodes for
+testing without those nodes needing to exist with physical or virtual hardware.
+
+Adoption
+--------
+Some OpenStack deployers have used ``fake`` interfaces in Ironic to allow an
+adoption-style workflow with Nova. By setting a node's hardware interfaces to
+``fake``, it's possible to deploy to that node with Nova without causing any
+actual changes to the hardware or an OS already deployed on it.
+
+This is generally an unsupported use case, but it is possible. For more
+information, see the relevant `post from CERN TechBlog`_.
+
+.. _`post from CERN TechBlog`: https://techblog.web.cern.ch/techblog/post/ironic-nova-adoption/
diff --git a/doc/source/admin/drivers/ilo.rst b/doc/source/admin/drivers/ilo.rst
index 7a2fae8bc..65ff4f6da 100644
--- a/doc/source/admin/drivers/ilo.rst
+++ b/doc/source/admin/drivers/ilo.rst
@@ -55,6 +55,8 @@ The hardware type ``ilo`` supports following HPE server features:
* `Updating security parameters as manual clean step`_
* `Update Minimum Password Length security parameter as manual clean step`_
* `Update Authentication Failure Logging security parameter as manual clean step`_
+* `Create Certificate Signing Request(CSR) as manual clean step`_
+* `Add HTTPS Certificate as manual clean step`_
* `Activating iLO Advanced license as manual clean step`_
* `Removing CA certificates from iLO as manual clean step`_
* `Firmware based UEFI iSCSI boot from volume support`_
@@ -65,6 +67,7 @@ The hardware type ``ilo`` supports following HPE server features:
* `BIOS configuration support`_
* `IPv6 support`_
* `Layer 3 or DHCP-less ramdisk booting`_
+* `Events subscription`_
Apart from above features hardware type ``ilo5`` also supports following
features:
@@ -200,6 +203,18 @@ The ``ilo`` hardware type supports following hardware interfaces:
enabled_hardware_types = ilo
enabled_rescue_interfaces = agent,no-rescue
+* vendor
+ Supports ``ilo``, ``ilo-redfish`` and ``no-vendor``. The default is
+ ``ilo``. They can be enabled by using the
+ ``[DEFAULT]enabled_vendor_interfaces`` option in ``ironic.conf`` as given
+ below:
+
+ .. code-block:: ini
+
+ [DEFAULT]
+ enabled_hardware_types = ilo
+ enabled_vendor_interfaces = ilo,ilo-redfish,no-vendor
+
The ``ilo5`` hardware type supports all the ``ilo`` interfaces described above,
except for ``boot`` and ``raid`` interfaces. The details of ``boot`` and
@@ -296,13 +311,8 @@ Node configuration
- ``deploy_iso``: The glance UUID of the deploy ramdisk ISO image.
- ``instance info/boot_iso`` property to be either boot iso
- Glance UUID or a HTTP(S) URL. This is optional property and is used when
- ``boot_option`` is set to ``netboot`` or ``ramdisk``.
-
- .. note::
- When ``boot_option`` is set to ``ramdisk``, the ironic node must be
- configured to use ``ramdisk`` deploy interface. See :ref:`ramdisk-deploy`
- for details.
+ Glance UUID or a HTTP(S) URL. This is optional property and is used with
+ :doc:`/admin/ramdisk-boot`.
.. note::
The ``boot_iso`` property used to be called ``ilo_boot_iso`` before
@@ -756,6 +766,12 @@ Supported **Manual** Cleaning Operations
``update_auth_failure_logging_threshold``:
Updates the Authentication Failure Logging security parameter. See
`Update Authentication Failure Logging security parameter as manual clean step`_ for user guidance on usage.
+ ``create_csr``:
+ Creates the certificate signing request. See `Create Certificate Signing Request(CSR) as manual clean step`_
+ for user guidance on usage.
+ ``add_https_certificate``:
+ Adds the signed HTTPS certificate to the iLO. See `Add HTTPS Certificate as manual clean step`_ for user
+ guidance on usage.
* iLO with firmware version 1.5 is minimally required to support all the
operations.
@@ -1043,9 +1059,7 @@ to send management information and images in encrypted channel over HTTPS.
Deploy Process
~~~~~~~~~~~~~~
-Please refer to `Netboot in swiftless deploy for intermediate images`_ for
-partition image support and `Localboot in swiftless deploy for intermediate images`_
-for whole disk image support.
+Please refer to `Swiftless deploy for intermediate images`_.
HTTP(S) Based Deploy Support
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1063,8 +1077,7 @@ must be reachable by the conductor and the bare metal nodes.
Deploy Process
~~~~~~~~~~~~~~
-Please refer to `Netboot with HTTP(S) based deploy`_ for partition image boot
-and `Localboot with HTTP(S) based deploy`_ for whole disk image boot.
+Please refer to `HTTP(S) based deploy`_.
Support for iLO driver with Standalone Ironic
@@ -1084,58 +1097,8 @@ intermediate images on conductor as described in
Deploy Process
==============
-.. note::
- Network boot is deprecated and will be removed in the Zed release.
-
-.. TODO(dtantsur): review these diagrams to exclude netboot.
-
-Netboot with glance and swift
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. seqdiag::
- :scale: 80
-
- diagram {
- Glance; Conductor; Baremetal; Swift; IPA; iLO;
- activation = none;
- span_height = 1;
- edge_length = 250;
- default_note_color = white;
- default_fontsize = 14;
-
- Conductor -> iLO [label = "Powers off the node"];
- Conductor -> Glance [label = "Download user image"];
- Conductor -> Glance [label = "Get the metadata for deploy ISO"];
- Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"];
- Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"];
- Conductor -> Swift [label = "Uploads the FAT32 image"];
- Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"];
- Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"];
- Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"];
- Conductor -> iLO [label = "Sets one time boot to CDROM"];
- Conductor -> iLO [label = "Reboot the node"];
- iLO -> Swift [label = "Downloads deploy ISO"];
- Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"];
- IPA -> Conductor [label = "Lookup node"];
- Conductor -> IPA [label = "Provides node UUID"];
- IPA -> Conductor [label = "Heartbeat"];
- Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
- IPA -> Swift [label = "Retrieves the user image on bare metal"];
- IPA -> IPA [label = "Writes user image to disk"];
- Conductor -> Conductor [label = "Generates the boot ISO"];
- Conductor -> Swift [label = "Uploads the boot ISO"];
- Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"];
- Conductor -> iLO [label = "Attaches boot ISO swift tempURL as virtual media CDROM"];
- Conductor -> iLO [label = "Sets boot device to CDROM"];
- Conductor -> IPA [label = "Power off the node"];
- Conductor -> iLO [label = "Power on the node"];
- iLO -> Swift [label = "Downloads boot ISO"];
- iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"];
- Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"];
- }
-
-Localboot with glance and swift for partition images
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Glance and swift for partition images
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. seqdiag::
:scale: 80
@@ -1176,8 +1139,8 @@ Localboot with glance and swift for partition images
}
-Localboot with glance and swift
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Glance and swift with whole-disk images
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. seqdiag::
:scale: 80
@@ -1216,52 +1179,8 @@ Localboot with glance and swift
Baremetal -> Baremetal [label = "Boot user image from disk"];
}
-Netboot in swiftless deploy for intermediate images
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. seqdiag::
- :scale: 80
-
- diagram {
- Glance; Conductor; Baremetal; ConductorWebserver; IPA; iLO;
- activation = none;
- span_height = 1;
- edge_length = 250;
- default_note_color = white;
- default_fontsize = 14;
-
- Conductor -> iLO [label = "Powers off the node"];
- Conductor -> Glance [label = "Download user image"];
- Conductor -> Glance [label = "Get the metadata for deploy ISO"];
- Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"];
- Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"];
- Conductor -> ConductorWebserver [label = "Uploads the FAT32 image"];
- Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"];
- Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"];
- Conductor -> iLO [label = "Sets one time boot to CDROM"];
- Conductor -> iLO [label = "Reboot the node"];
- iLO -> Swift [label = "Downloads deploy ISO"];
- Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"];
- IPA -> Conductor [label = "Lookup node"];
- Conductor -> IPA [label = "Provides node UUID"];
- IPA -> Conductor [label = "Heartbeat"];
- Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
- IPA -> ConductorWebserver [label = "Retrieves the user image on bare metal"];
- IPA -> IPA [label = "Writes user image to root partition"];
- Conductor -> Conductor [label = "Generates the boot ISO"];
- Conductor -> ConductorWebserver [label = "Uploads the boot ISO"];
- Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"];
- Conductor -> iLO [label = "Sets boot device to CDROM"];
- Conductor -> IPA [label = "Power off the node"];
- Conductor -> iLO [label = "Power on the node"];
- iLO -> ConductorWebserver [label = "Downloads boot ISO"];
- iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"];
- Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"];
- }
-
-
-Localboot in swiftless deploy for intermediate images
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Swiftless deploy
+^^^^^^^^^^^^^^^^
.. seqdiag::
:scale: 80
@@ -1299,51 +1218,8 @@ Localboot in swiftless deploy for intermediate images
Baremetal -> Baremetal [label = "Boot user image from disk"];
}
-Netboot with HTTP(S) based deploy
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. seqdiag::
- :scale: 80
-
- diagram {
- Webserver; Conductor; Baremetal; Swift; IPA; iLO;
- activation = none;
- span_height = 1;
- edge_length = 250;
- default_note_color = white;
- default_fontsize = 14;
-
- Conductor -> iLO [label = "Powers off the node"];
- Conductor -> Webserver [label = "Download user image"];
- Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"];
- Conductor -> Swift [label = "Uploads the FAT32 image"];
- Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"];
- Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"];
- Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"];
- Conductor -> iLO [label = "Sets one time boot to CDROM"];
- Conductor -> iLO [label = "Reboot the node"];
- iLO -> Webserver [label = "Downloads deploy ISO"];
- Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"];
- IPA -> Conductor [label = "Lookup node"];
- Conductor -> IPA [label = "Provides node UUID"];
- IPA -> Conductor [label = "Heartbeat"];
- Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
- IPA -> Swift [label = "Retrieves the user image on bare metal"];
- IPA -> IPA [label = "Writes user image to disk"];
- Conductor -> Conductor [label = "Generates the boot ISO"];
- Conductor -> Swift [label = "Uploads the boot ISO"];
- Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"];
- Conductor -> iLO [label = "Attaches boot ISO swift tempURL as virtual media CDROM"];
- Conductor -> iLO [label = "Sets boot device to CDROM"];
- Conductor -> IPA [label = "Power off the node"];
- Conductor -> iLO [label = "Power on the node"];
- iLO -> Swift [label = "Downloads boot ISO"];
- iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"];
- Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"];
- }
-
-Localboot with HTTP(S) based deploy
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+HTTP(S) based deploy
+^^^^^^^^^^^^^^^^^^^^
.. seqdiag::
:scale: 80
@@ -1379,49 +1255,8 @@ Localboot with HTTP(S) based deploy
Baremetal -> Baremetal [label = "Boot user image from disk"];
}
-Netboot in standalone ironic
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. seqdiag::
- :scale: 80
-
- diagram {
- Webserver; Conductor; Baremetal; ConductorWebserver; IPA; iLO;
- activation = none;
- span_height = 1;
- edge_length = 250;
- default_note_color = white;
- default_fontsize = 14;
-
- Conductor -> iLO [label = "Powers off the node"];
- Conductor -> Webserver [label = "Download user image"];
- Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"];
- Conductor -> ConductorWebserver[label = "Uploads the FAT32 image"];
- Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"];
- Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"];
- Conductor -> iLO [label = "Sets one time boot to CDROM"];
- Conductor -> iLO [label = "Reboot the node"];
- iLO -> Webserver [label = "Downloads deploy ISO"];
- Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"];
- IPA -> Conductor [label = "Lookup node"];
- Conductor -> IPA [label = "Provides node UUID"];
- IPA -> Conductor [label = "Heartbeat"];
- Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
- IPA -> ConductorWebserver [label = "Retrieves the user image on bare metal"];
- IPA -> IPA [label = "Writes user image to root partition"];
- Conductor -> Conductor [label = "Generates the boot ISO"];
- Conductor -> ConductorWebserver [label = "Uploads the boot ISO"];
- Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"];
- Conductor -> iLO [label = "Sets boot device to CDROM"];
- Conductor -> IPA [label = "Power off the node"];
- Conductor -> iLO [label = "Power on the node"];
- iLO -> ConductorWebserver [label = "Downloads boot ISO"];
- iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"];
- Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"];
- }
-
-Localboot in standalone ironic
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Standalone ironic
+^^^^^^^^^^^^^^^^^
.. seqdiag::
:scale: 80
@@ -1834,6 +1669,54 @@ Both the arguments ``logging_threshold`` and ``ignore`` are optional. The accept
value be False. If user passes the value of logging_threshold as 0, the Authentication Failure Logging security
parameter will be disabled.
+Create Certificate Signing Request(CSR) as manual clean step
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+iLO driver can invoke ``create_csr`` request as a manual clean step. This step is only supported for iLO5 based hardware.
+
+An example of a manual clean step with ``create_csr`` as the only clean step could be::
+
+ "clean_steps": [{
+ "interface": "management",
+ "step": "create_csr",
+ "args": {
+ "csr_params": {
+ "City": "Bengaluru",
+ "CommonName": "1.1.1.1",
+ "Country": "India",
+ "OrgName": "HPE",
+ "State": "Karnataka"
+ }
+ }
+ }]
+
+The ``[ilo]cert_path`` option in ``ironic.conf`` is used as the directory path for
+creating the CSR, which defaults to ``/var/lib/ironic/ilo``. The CSR is created in the directory location
+given in ``[ilo]cert_path`` in ``node_uuid`` directory as <node_uuid>.csr.
+
+
+Add HTTPS Certificate as manual clean step
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+iLO driver can invoke ``add_https_certificate`` request as a manual clean step. This step is only supported for
+iLO5 based hardware.
+
+An example of a manual clean step with ``add_https_certificate`` as the only clean step could be::
+
+ "clean_steps": [{
+ "interface": "management",
+ "step": "add_https_certificate",
+ "args": {
+ "cert_file": "/test1/iLO.crt"
+ }
+ }]
+
+Argument ``cert_file`` is mandatory. The ``cert_file`` takes the path or url of the certificate file.
+The url schemes supported are: ``file``, ``http`` and ``https``.
+The CSR generated in step ``create_csr`` needs to be signed by a valid CA and the resultant HTTPS certificate should
+be provided in ``cert_file``. It copies the ``cert_file`` to ``[ilo]cert_path`` under ``node.uuid`` as <node_uuid>.crt
+before adding it to iLO.
+
RAID Support
^^^^^^^^^^^^
@@ -2322,6 +2205,12 @@ DHCP-less deploy is supported by ``ilo`` and ``ilo5`` hardware types.
However it would work only with ilo-virtual-media boot interface. See
:doc:`/admin/dhcp-less` for more information.
+Events subscription
+^^^^^^^^^^^^^^^^^^^
+Events subscription is supported by ``ilo`` and ``ilo5`` hardware types with
+``ilo`` vendor interface for Gen10 and Gen10 Plus servers. See
+:ref:`node-vendor-passthru-methods` for more information.
+
.. _`ssacli documentation`: https://support.hpe.com/hpsc/doc/public/display?docId=c03909334
.. _`proliant-tools`: https://docs.openstack.org/diskimage-builder/latest/elements/proliant-tools/README.html
.. _`HPE iLO4 User Guide`: https://h20566.www2.hpe.com/hpsc/doc/public/display?docId=c03334051
diff --git a/doc/source/admin/drivers/irmc.rst b/doc/source/admin/drivers/irmc.rst
index 15d245c10..17b8d8644 100644
--- a/doc/source/admin/drivers/irmc.rst
+++ b/doc/source/admin/drivers/irmc.rst
@@ -111,6 +111,9 @@ Here is a command example to enroll a node with ``irmc`` hardware type.
Node configuration
^^^^^^^^^^^^^^^^^^
+Configuration via ``driver_info``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
* Each node is configured for ``irmc`` hardware type by setting the following
ironic node object's properties:
@@ -126,14 +129,52 @@ Node configuration
UEFI Secure Boot is required. Please refer to `UEFI Secure Boot Support`_
for more information.
+* If ``port`` in ``[irmc]`` section of ``/etc/ironic/ironic.conf`` or
+ ``driver_info/irmc_port`` is set to 443, ``driver_info/irmc_verify_ca``
+ will take effect:
+
+ ``driver_info/irmc_verify_ca`` property takes one of 4 value (default value
+ is ``True``):
+
+ - ``True``: When set to ``True``, which certification file iRMC driver uses
+ is determined by ``requests`` Python module.
+
+ Value of ``driver_info/irmc_verify_ca`` is passed to ``verify`` argument
+ of functions defined in ``requests`` Python module. So which certification
+ will be used is depend on behavior of ``requests`` module.
+ (maybe certification provided by ``certifi`` Python module)
+
+ - ``False``: When set to ``False``, iRMC driver won't verify server
+ certification with certification file during HTTPS connection with iRMC.
+ Just stop to verify server certification, but does HTTPS.
+
+ .. warning::
+ When set to ``False``, user must notice that it can result in
+ vulnerable situation. Stopping verification of server certification
+ during HTTPS connection means it cannot prevent Man-in-the-middle
+ attack. When set to ``False``, Ironic user must take enough care
+ around infrastructure environment in terms of security.
+ (e.g. make sure network between Ironic conductor and iRMC is secure)
+
+ - string representing filesystem path to directory which contains
+ certification file: In this case, iRMC driver uses certification file
+ stored at specified directory. Ironic conductor must be able to access
+ that directory. For iRMC to recongnize certification file, Ironic user
+ must run ``openssl rehash <path_to_dir>``.
+
+ - string representing filesystem path to certification file: In this case,
+ iRMC driver uses certification file specified. Ironic conductor must have
+ access to that file.
+
+
* The following properties are also required if ``irmc-virtual-media`` boot
interface is used:
- ``driver_info/deploy_iso`` property to be either deploy iso
file name, Glance UUID, or Image Service URL.
- ``instance info/boot_iso`` property to be either boot iso
- file name, Glance UUID, or Image Service URL. This is optional
- property when ``boot_option`` is set to ``netboot``.
+ file name, Glance UUID, or Image Service URL. This is used
+ with the ``ramdisk`` deploy interface.
.. note::
The ``deploy_iso`` and ``boot_iso`` properties used to be called
@@ -150,6 +191,9 @@ Node configuration
- ``driver_info/irmc_snmp_priv_password`` property to be the privacy protocol
pass phrase. The length of pass phrase should be at least 8 characters.
+Configuration via ``ironic.conf``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
* All of the nodes are configured by setting the following configuration
options in the ``[irmc]`` section of ``/etc/ironic/ironic.conf``:
@@ -198,6 +242,10 @@ Node configuration
``driver_info/irmc_snmp_user`` parameter for each node if SNMPv3
inspection is needed.
+
+Override ``ironic.conf`` configuration via ``driver_info``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
* Each node can be further configured by setting the following ironic
node object's properties which override the parameter values in
``[irmc]`` section of ``/etc/ironic/ironic.conf``:
@@ -215,6 +263,7 @@ Node configuration
- ``driver_info/irmc_snmp_priv_proto`` property overrides
``snmp_priv_proto``.
+
Optional functionalities for the ``irmc`` hardware type
=======================================================
diff --git a/doc/source/admin/drivers/redfish.rst b/doc/source/admin/drivers/redfish.rst
index dd19f8bde..eb1f561f4 100644
--- a/doc/source/admin/drivers/redfish.rst
+++ b/doc/source/admin/drivers/redfish.rst
@@ -87,8 +87,18 @@ field:
The "auto" mode first tries "session" and falls back
to "basic" if session authentication is not supported
by the Redfish BMC. Default is set in ironic config
- as ``[redfish]auth_type``.
+ as ``[redfish]auth_type``. Most operators should not
+ need to leverage this setting. Session based
+ authentication should generally be used in most
+ cases as it prevents re-authentication every time
+ a background task checks in with the BMC.
+.. note::
+ The ``redfish_address``, ``redfish_username``, ``redfish_password``,
+ and ``redfish_verify_ca`` fields, if changed, will trigger a new session
+ to be establsihed and cached with the BMC. The ``redfish_auth_type`` field
+ will only be used for the creation of a new cached session, or should
+ one be rejected by the BMC.
The ``baremetal node create`` command can be used to enroll
a node with the ``redfish`` driver. For example:
@@ -533,6 +543,8 @@ settings. The following fields will be returned in the BIOS API
"``unique``", "The setting is specific to this node"
"``reset_required``", "After changing this setting a node reboot is required"
+.. _node-vendor-passthru-methods:
+
Node Vendor Passthru Methods
============================
@@ -620,6 +632,44 @@ Eject Virtual Media
"boot_device (optional)", "body", "string", "Type of the device to eject (all devices by default)"
+Internal Session Cache
+======================
+
+The ``redfish`` hardware type, and derived interfaces, utilizes a built-in
+session cache which prevents Ironic from re-authenticating every time
+Ironic attempts to connect to the BMC for any reason.
+
+This consists of cached connectors objects which are used and tracked by
+a unique consideration of ``redfish_username``, ``redfish_password``,
+``redfish_verify_ca``, and finally ``redfish_address``. Changing any one
+of those values will trigger a new session to be created.
+The ``redfish_system_id`` value is explicitly not considered as Redfish
+has a model of use of one BMC to many systems, which is also a model
+Ironic supports.
+
+The session cache default size is ``1000`` sessions per conductor.
+If you are operating a deployment with a larger number of Redfish
+BMCs, it is advised that you do appropriately tune that number.
+This can be tuned via the API service configuration file,
+``[redfish]connection_cache_size``.
+
+Session Cache Expiration
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default, sessions remain cached for as long as possible in
+memory, as long as they have not experienced an authentication,
+connection, or other unexplained error.
+
+Under normal circumstances, the sessions will only be rolled out
+of the cache in order of oldest first when the cache becomes full.
+There is no time based expiration to entries in the session cache.
+
+Of course, the cache is only in memory, and restarting the
+``ironic-conductor`` will also cause the cache to be rebuilt
+from scratch. If this is due to any persistent connectivity issue,
+this may be sign of an unexpected condition, and please consider
+contacting the Ironic developer community for assistance.
+
.. _Redfish: http://redfish.dmtf.org/
.. _Sushy: https://opendev.org/openstack/sushy
.. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security
diff --git a/doc/source/admin/drivers/snmp.rst b/doc/source/admin/drivers/snmp.rst
index 1c402ab9b..eed4ed794 100644
--- a/doc/source/admin/drivers/snmp.rst
+++ b/doc/source/admin/drivers/snmp.rst
@@ -22,39 +22,47 @@ this table could possibly work using a similar driver.
Please report any device status.
-============== ========== ========== =====================
-Manufacturer Model Supported? Driver name
-============== ========== ========== =====================
-APC AP7920 Yes apc_masterswitch
-APC AP9606 Yes apc_masterswitch
-APC AP9225 Yes apc_masterswitchplus
-APC AP7155 Yes apc_rackpdu
-APC AP7900 Yes apc_rackpdu
-APC AP7901 Yes apc_rackpdu
-APC AP7902 Yes apc_rackpdu
-APC AP7911a Yes apc_rackpdu
-APC AP7921 Yes apc_rackpdu
-APC AP7922 Yes apc_rackpdu
-APC AP7930 Yes apc_rackpdu
-APC AP7931 Yes apc_rackpdu
-APC AP7932 Yes apc_rackpdu
-APC AP7940 Yes apc_rackpdu
-APC AP7941 Yes apc_rackpdu
-APC AP7951 Yes apc_rackpdu
-APC AP7960 Yes apc_rackpdu
-APC AP7990 Yes apc_rackpdu
-APC AP7998 Yes apc_rackpdu
-APC AP8941 Yes apc_rackpdu
-APC AP8953 Yes apc_rackpdu
-APC AP8959 Yes apc_rackpdu
-APC AP8961 Yes apc_rackpdu
-APC AP8965 Yes apc_rackpdu
-Aten all? Yes aten
-CyberPower all? Untested cyberpower
-EatonPower all? Untested eatonpower
-Teltronix all? Yes teltronix
-BayTech MRP27 Yes baytech_mrp27
-============== ========== ========== =====================
+============== ============== ========== =====================
+Manufacturer Model Supported? Driver name
+============== ============== ========== =====================
+APC AP7920 Yes apc_masterswitch
+APC AP9606 Yes apc_masterswitch
+APC AP9225 Yes apc_masterswitchplus
+APC AP7155 Yes apc_rackpdu
+APC AP7900 Yes apc_rackpdu
+APC AP7901 Yes apc_rackpdu
+APC AP7902 Yes apc_rackpdu
+APC AP7911a Yes apc_rackpdu
+APC AP7921 Yes apc_rackpdu
+APC AP7922 Yes apc_rackpdu
+APC AP7930 Yes apc_rackpdu
+APC AP7931 Yes apc_rackpdu
+APC AP7932 Yes apc_rackpdu
+APC AP7940 Yes apc_rackpdu
+APC AP7941 Yes apc_rackpdu
+APC AP7951 Yes apc_rackpdu
+APC AP7960 Yes apc_rackpdu
+APC AP7990 Yes apc_rackpdu
+APC AP7998 Yes apc_rackpdu
+APC AP8941 Yes apc_rackpdu
+APC AP8953 Yes apc_rackpdu
+APC AP8959 Yes apc_rackpdu
+APC AP8961 Yes apc_rackpdu
+APC AP8965 Yes apc_rackpdu
+Aten all? Yes aten
+CyberPower all? Untested cyberpower
+EatonPower all? Untested eatonpower
+Teltronix all? Yes teltronix
+BayTech MRP27 Yes baytech_mrp27
+Raritan PX3-5547V-V2 Yes raritan_pdu2
+Raritan PX3-5726V Yes raritan_pdu2
+Raritan PX3-5776U-N2 Yes raritan_pdu2
+Raritan PX3-5969U-V2 Yes raritan_pdu2
+Raritan PX3-5961I2U-V2 Yes raritan_pdu2
+Vertiv NU30212 Yes vertivgeist_pdu
+ServerTech CW-16VE-P32M Yes servertech_sentry3
+ServerTech C2WG24SN Yes servertech_sentry4
+============== ============== ========== =====================
Software Requirements
diff --git a/doc/source/admin/interfaces/deploy.rst b/doc/source/admin/interfaces/deploy.rst
index f2ee5d95d..7db5a24ff 100644
--- a/doc/source/admin/interfaces/deploy.rst
+++ b/doc/source/admin/interfaces/deploy.rst
@@ -131,8 +131,6 @@ Anaconda deploy
The ``anaconda`` deploy interface is another option for highly customized
deployments. See :doc:`/admin/anaconda-deploy-interface` for more details.
-.. _ramdisk-deploy:
-
Ramdisk deploy
==============
diff --git a/doc/source/admin/ramdisk-boot.rst b/doc/source/admin/ramdisk-boot.rst
index 29708b781..5e9900f08 100644
--- a/doc/source/admin/ramdisk-boot.rst
+++ b/doc/source/admin/ramdisk-boot.rst
@@ -11,8 +11,9 @@ It is suported by ``pxe``, ``ipxe``, ``redfish-virtual-media`` and
Configuration
-------------
-Ramdisk/ISO boot requires using the ``ramdisk`` deploy interface. As with most
-non-default interfaces, it must be enabled and set for a node to be utilized:
+Ramdisk/ISO boot requires using the ``ramdisk`` deploy interface. It is enabled
+by default starting with the Zed release cycle. On an earlier release, it must
+be enabled explicitly:
.. code-block:: ini
diff --git a/doc/source/admin/report.txt b/doc/source/admin/report.txt
index 1f1fc4d8e..a1c96e2cc 100644
--- a/doc/source/admin/report.txt
+++ b/doc/source/admin/report.txt
@@ -321,7 +321,6 @@ default:
deploy:
continue_if_disk_secure_erase_fails = False
- default_boot_option = local
erase_devices_metadata_priority = None
erase_devices_priority = 0
http_root = /opt/stack/data/ironic/httpboot
diff --git a/doc/source/admin/secure-rbac.rst b/doc/source/admin/secure-rbac.rst
index 639cfcb23..7721211b6 100644
--- a/doc/source/admin/secure-rbac.rst
+++ b/doc/source/admin/secure-rbac.rst
@@ -267,3 +267,16 @@ restrictive and an ``owner`` may revoke access to ``lessee``.
Access to the underlying baremetal node is not exclusive between the
``owner`` and ``lessee``, and this use model expects that some level of
communication takes place between the appropriate parties.
+
+Can I, a project admin, create a node?
+--------------------------------------
+
+Starting in API version ``1.80``, the capability was added
+to allow users with an ``admin`` role to be able to create and
+delete their own nodes in Ironic.
+
+This functionality is enabled by default, and automatically
+imparts ``owner`` privileges to the created Bare Metal node.
+
+This functionality can be disabled by setting
+``[api]project_admin_can_manage_own_nodes`` to ``False``.
diff --git a/doc/source/admin/troubleshooting.rst b/doc/source/admin/troubleshooting.rst
index 8cf49392f..2791430fd 100644
--- a/doc/source/admin/troubleshooting.rst
+++ b/doc/source/admin/troubleshooting.rst
@@ -138,7 +138,7 @@ A few things should be checked in this case:
+------------+----------------------------------+
| Field | Value |
+------------+----------------------------------+
- | properties | capabilities:boot_option='local' |
+ | properties | capabilities:boot_mode='uefi' |
+------------+----------------------------------+
But in Ironic node::
@@ -147,7 +147,7 @@ A few things should be checked in this case:
+------------+-----------------------------------------+
| Property | Value |
+------------+-----------------------------------------+
- | properties | {u'capabilities': u'boot_option:local'} |
+ | properties | {u'capabilities': u'boot_mode:uefi'} |
+------------+-----------------------------------------+
#. After making changes to nodes in Ironic, it takes time for those changes
@@ -973,3 +973,40 @@ Unfortunately, due to the way the conductor is designed, it is not possible to
gracefully break a stuck lock held in ``*-ing`` states. As the last resort, you
may need to restart the affected conductor. See `Why are my nodes stuck in a
"-ing" state?`_.
+
+What is ConcurrentActionLimit?
+==============================
+
+ConcurrentActionLimit is an exception which is raised to clients when an
+operation is requested, but cannot be serviced at that moment because the
+overall threshold of nodes in concurrent "Deployment" or "Cleaning"
+operations has been reached.
+
+These limits exist for two distinct reasons.
+
+The first is they allow an operator to tune a deployment such that too many
+concurrent deployments cannot be triggered at any given time, as a single
+conductor has an internal limit to the number of overall concurrent tasks,
+this restricts only the number of running concurrent actions. As such, this
+accounts for the number of nodes in ``deploy`` and ``deploy wait`` states.
+In the case of deployments, the default value is relatively high and should
+be suitable for *most* larger operators.
+
+The second is to help slow down the ability in which an entire population of
+baremetal nodes can be moved into and through cleaning, in order to help
+guard against authenticated malicious users, or accidental script driven
+operations. In this case, the total number of nodes in ``deleting``,
+``cleaning``, and ``clean wait`` are evaluated. The default maximum limit
+for cleaning operations is *50* and should be suitable for the majority of
+baremetal operators.
+
+These settings can be modified by using the
+``[conductor]max_concurrent_deploy`` and ``[conductor]max_concurrent_clean``
+settings from the ironic.conf file supporting the ``ironic-conductor``
+service. Neither setting can be explicity disabled, however there is also no
+upper limit to the setting.
+
+.. note::
+ This was an infrastructure operator requested feature from actual lessons
+ learned in the operation of Ironic in large scale production. The defaults
+ may not be suitable for the largest scale operators.
diff --git a/doc/source/contributor/webapi-version-history.rst b/doc/source/contributor/webapi-version-history.rst
index f90cb3c6c..c395bdcbe 100644
--- a/doc/source/contributor/webapi-version-history.rst
+++ b/doc/source/contributor/webapi-version-history.rst
@@ -2,8 +2,20 @@
REST API Version History
========================
+1.80 (Zed)
+----------
+
+This verison is a signifier of additional RBAC functionality allowing
+a project scoped ``admin`` to create or delete nodes in Ironic.
+
+1.79 (Zed, 21.0)
+----------------------
+A node with the same name as the allocation ``name`` is moved to the
+start of the derived candidate list.
+
1.78 (Xena, 18.2)
----------------------
+
Add endpoints to allow history events for nodes to be retrieved via
the REST API.
diff --git a/doc/source/install/advanced.rst b/doc/source/install/advanced.rst
index ebb0b99e2..8e8c8fa50 100644
--- a/doc/source/install/advanced.rst
+++ b/doc/source/install/advanced.rst
@@ -3,8 +3,6 @@
Advanced features
=================
-.. include:: include/local-boot-partition-images.inc
-
.. include:: include/root-device-hints.inc
.. include:: include/kernel-boot-parameters.inc
diff --git a/doc/source/install/include/disk-label.inc b/doc/source/install/include/disk-label.inc
index fb144ba37..4a0fa9b02 100644
--- a/doc/source/install/include/disk-label.inc
+++ b/doc/source/install/include/disk-label.inc
@@ -40,6 +40,12 @@ and sections will describe both methods:
* Only one disk label - either ``msdos`` or ``gpt`` - can be configured
for the node.
+.. warning::
+ If the host is in UEFI boot mode, use of ``disk_label`` is redundant,
+ and may cause deployments to fail unexpectedly if the node is *not*
+ explicitly set to boot in UEFI mode. Use of appropriate boot mode is
+ highly recommended.
+
When used with Compute service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/install/include/local-boot-partition-images.inc b/doc/source/install/include/local-boot-partition-images.inc
deleted file mode 100644
index 32ee200ce..000000000
--- a/doc/source/install/include/local-boot-partition-images.inc
+++ /dev/null
@@ -1,56 +0,0 @@
-.. _local-boot-partition-images:
-
-Local boot with partition images
---------------------------------
-
-The Bare Metal service supports local boot with partition images, meaning that
-after the deployment the node's subsequent reboots won't happen via PXE or
-Virtual Media. Instead, it will boot from a local boot loader installed on
-the disk.
-
-.. note:: Whole disk images, on the contrary, support only local boot, and use
- it by default.
-
-It's important to note that in order for this to work the image being
-deployed with Bare Metal service **must** contain ``grub2`` installed within it.
-
-Enabling the local boot is different when Bare Metal service is used with
-Compute service and without it.
-The following sections will describe both methods.
-
-.. _ironic-python-agent: https://docs.openstack.org/ironic-python-agent/latest/
-
-
-Enabling local boot with Compute service
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To enable local boot we need to set a capability on the bare metal node,
-for example::
-
- baremetal node set <node-uuid> --property capabilities="boot_option:local"
-
-
-Nodes having ``boot_option`` set to ``local`` may be requested by adding
-an ``extra_spec`` to the Compute service flavor, for example::
-
- openstack flavor set baremetal --property capabilities:boot_option="local"
-
-
-.. note::
- If the node is configured to use ``UEFI``, Bare Metal service will create
- an ``EFI partition`` on the disk and switch the partition table format to
- ``gpt``. The ``EFI partition`` will be used later by the boot loader
- (which is installed from the deploy ramdisk).
-
-.. _local-boot-without-compute:
-
-Enabling local boot without Compute
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Since adding ``capabilities`` to the node's properties is only used by
-the nova scheduler to perform more advanced scheduling of instances,
-we need a way to enable local boot when Compute is not present. To do that
-we can simply specify the capability via the ``instance_info`` attribute
-of the node, for example::
-
- baremetal node set <node-uuid> --instance-info capabilities='{"boot_option": "local"}'
diff --git a/doc/source/install/refarch/common.rst b/doc/source/install/refarch/common.rst
index ec2a0dbbb..800632fd5 100644
--- a/doc/source/install/refarch/common.rst
+++ b/doc/source/install/refarch/common.rst
@@ -135,44 +135,9 @@ The Bare Metal service can deploy two types of images:
.. warning::
Partition images are only supported with GNU/Linux operating systems.
- .. warning::
- If you plan on using local boot, your partition images must contain GRUB2
- bootloader tools to enable ironic to set up the bootloader during deploy.
-
-Local vs network boot
----------------------
-
-The Bare Metal service supports booting user instances either using a local
-bootloader or using the driver's boot interface (e.g. via PXE_ or iPXE_
-protocol in case of the ``pxe`` interface).
-
-Network boot cannot be used with certain architectures (for example, when no
-tenant networks have access to the control plane).
-
-Additional considerations are related to the ``pxe`` boot interface, and other
-boot interfaces based on it:
-
-* Local boot makes node's boot process independent of the Bare Metal conductor
- managing it. Thus, nodes are able to reboot correctly, even if the Bare Metal
- TFTP or HTTP service is down.
-
-* Network boot (and iPXE) must be used when booting nodes from remote volumes,
- if the driver does not support attaching volumes out-of-band.
-
-The default boot option for the cloud can be changed via the Bare Metal service
-configuration file, for example:
-
-.. code-block:: ini
-
- [deploy]
- default_boot_option = local
-
-This default can be overridden by setting the ``boot_option`` capability on a
-node. See :ref:`local-boot-partition-images` for details.
-
-.. note::
- Currently, local boot is used by default. It's safer to set
- the ``default_boot_option`` explicitly.
+ For the Bare Metal service to set up the bootloader during deploy, your
+ partition images must container either GRUB2 bootloader or ready-to-use
+ EFI artifacts.
.. _refarch-common-networking:
diff --git a/doc/source/user/architecture.rst b/doc/source/user/architecture.rst
index de86f47de..1b034fbb4 100644
--- a/doc/source/user/architecture.rst
+++ b/doc/source/user/architecture.rst
@@ -158,8 +158,9 @@ remain the same.
#. The ironic node's management interface issues commands to enable network
boot of a node.
-#. The ironic node's deploy interface caches the instance image, kernel and
- ramdisk if needed (it is needed in case of netboot for example).
+#. The ironic node's deploy interface caches the instance image (normal
+ deployment), kernel and ramdisk (``ramdisk`` deploy) or ISO (``ramdisk``
+ deploy with virtual media).
#. The ironic node's power interface instructs the node to power on.
diff --git a/doc/source/user/deploy.rst b/doc/source/user/deploy.rst
index 9b4609289..7499786cb 100644
--- a/doc/source/user/deploy.rst
+++ b/doc/source/user/deploy.rst
@@ -186,14 +186,6 @@ Capabilities
previously ironic used a separate ``instance_info/deploy_boot_mode``
field instead.
-* To override the :ref:`boot option <local-boot-partition-images>` used for
- this instance, set the ``boot_option`` capability:
-
- .. code-block:: shell
-
- baremetal node set $NODE_UUID \
- --instance-info capabilities='{"boot_option": "local"}'
-
* Starting with the Ussuri release, you can set :ref:`root device hints
<root-device-hints>` per instance:
diff --git a/driver-requirements.txt b/driver-requirements.txt
index 5333dbd4f..876e817cb 100644
--- a/driver-requirements.txt
+++ b/driver-requirements.txt
@@ -4,7 +4,7 @@
# python projects they should package as optional dependencies for Ironic.
# These are available on pypi
-proliantutils>=2.13.0
+proliantutils>=2.14.0
pysnmp>=4.3.0,<5.0.0
python-scciclient>=0.12.2
python-dracclient>=5.1.0,<9.0.0
@@ -17,4 +17,4 @@ ansible>=2.7
python-ibmcclient>=0.2.2,<0.3.0
# Dell EMC iDRAC sushy OEM extension
-sushy-oem-idrac>=4.0.0,<5.0.0
+sushy-oem-idrac>=5.0.0,<6.0.0
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index 2ef369e13..59b166db4 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -1172,8 +1172,16 @@ def _get_chassis_uuid(node):
"""
if not node.chassis_id:
return
- chassis = objects.Chassis.get_by_id(api.request.context, node.chassis_id)
- return chassis.uuid
+ try:
+ chassis = objects.Chassis.get_by_id(api.request.context,
+ node.chassis_id)
+ return chassis.uuid
+ except exception.ChassisNotFound:
+ # NOTE(TheJulia): This is a case where multiple threads are racing
+ # and the chassis was not found... or somebody edited the database
+ # directly. Regardless, operationally, there is no chassis, and we
+ # return as there is nothing to actually return to the API consumer.
+ return
def _replace_chassis_uuid_with_id(node_dict):
@@ -2454,7 +2462,15 @@ class NodesController(rest.RestController):
raise exception.OperationNotPermitted()
context = api.request.context
- api_utils.check_policy('baremetal:node:create')
+ owned_node = False
+ if CONF.api.project_admin_can_manage_own_nodes:
+ owned_node = api_utils.check_policy_true(
+ 'baremetal:node:create:self_owned_node')
+ else:
+ owned_node = False
+
+ if not owned_node:
+ api_utils.check_policy('baremetal:node:create')
reject_fields_in_newer_versions(node)
@@ -2478,6 +2494,28 @@ class NodesController(rest.RestController):
if not node.get('resource_class'):
node['resource_class'] = CONF.default_resource_class
+ cdict = context.to_policy_values()
+ if cdict.get('system_scope') != 'all' and owned_node:
+ # This only applies when the request is not system
+ # scoped.
+
+ # First identify what was requested, and if there is
+ # a project ID to use.
+ project_id = None
+ requested_owner = node.get('owner', None)
+ if cdict.get('project_id', False):
+ project_id = cdict.get('project_id')
+
+ if requested_owner and requested_owner != project_id:
+ # Translation: If project scoped, and an owner has been
+ # requested, and that owner does not match the requestor's
+ # project ID value.
+ msg = _("Cannot create a node as a project scoped admin "
+ "with an owner other than your own project.")
+ raise exception.Invalid(msg)
+ # Finally, note the project ID
+ node['owner'] = project_id
+
chassis = _replace_chassis_uuid_with_id(node)
chassis_uuid = chassis and chassis.uuid or None
@@ -2731,8 +2769,16 @@ class NodesController(rest.RestController):
raise exception.OperationNotPermitted()
context = api.request.context
- rpc_node = api_utils.check_node_policy_and_retrieve(
- 'baremetal:node:delete', node_ident, with_suffix=True)
+ try:
+ rpc_node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:delete', node_ident, with_suffix=True)
+ except exception.HTTPForbidden:
+ if not CONF.api.project_admin_can_manage_own_nodes:
+ raise
+ else:
+ rpc_node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:delete:self_owned_node', node_ident,
+ with_suffix=True)
chassis_uuid = _get_chassis_uuid(rpc_node)
notify.emit_start_notification(context, rpc_node, 'delete',
diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py
index 04525ff65..8de2d156d 100644
--- a/ironic/api/controllers/v1/utils.py
+++ b/ironic/api/controllers/v1/utils.py
@@ -86,11 +86,13 @@ STANDARD_TRAITS = os_traits.get_traits()
CUSTOM_TRAIT_PATTERN = "^%s[A-Z0-9_]+$" % os_traits.CUSTOM_NAMESPACE
CUSTOM_TRAIT_REGEX = re.compile(CUSTOM_TRAIT_PATTERN)
-TRAITS_SCHEMA = {'anyOf': [
- {'type': 'string', 'minLength': 1, 'maxLength': 255,
- 'pattern': CUSTOM_TRAIT_PATTERN},
- {'type': 'string', 'enum': STANDARD_TRAITS},
-]}
+TRAITS_SCHEMA = {
+ 'type': 'string', 'minLength': 1, 'maxLength': 255,
+ 'anyOf': [
+ {'pattern': CUSTOM_TRAIT_PATTERN},
+ {'enum': STANDARD_TRAITS},
+ ]
+}
LOCAL_LINK_BASE_SCHEMA = {
'type': 'object',
diff --git a/ironic/api/controllers/v1/versions.py b/ironic/api/controllers/v1/versions.py
index 7fc80bc97..763d92389 100644
--- a/ironic/api/controllers/v1/versions.py
+++ b/ironic/api/controllers/v1/versions.py
@@ -117,7 +117,7 @@ BASE_VERSION = 1
# v1.77: Add fields selector to drivers list and driver detail.
# v1.78: Add node history endpoint
# v1.79: Change allocation behaviour to prefer node name match
-
+# v1.80: Marker to represent self service node creation/deletion
MINOR_0_JUNO = 0
MINOR_1_INITIAL_VERSION = 1
MINOR_2_AVAILABLE_STATE = 2
@@ -198,6 +198,7 @@ MINOR_76_NODE_CHANGE_BOOT_MODE = 76
MINOR_77_DRIVER_FIELDS_SELECTOR = 77
MINOR_78_NODE_HISTORY = 78
MINOR_79_ALLOCATION_NODE_NAME = 79
+MINOR_80_PROJECT_CREATE_DELETE_NODE = 80
# When adding another version, update:
# - MINOR_MAX_VERSION
@@ -205,7 +206,7 @@ MINOR_79_ALLOCATION_NODE_NAME = 79
# explanation of what changed in the new version
# - common/release_mappings.py, RELEASE_MAPPING['master']['api']
-MINOR_MAX_VERSION = MINOR_79_ALLOCATION_NODE_NAME
+MINOR_MAX_VERSION = MINOR_80_PROJECT_CREATE_DELETE_NODE
# String representations of the minor and maximum versions
_MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION)
diff --git a/ironic/common/args.py b/ironic/common/args.py
index 94cfe8841..bd13e3eaf 100755
--- a/ironic/common/args.py
+++ b/ironic/common/args.py
@@ -211,12 +211,17 @@ def _validate_schema(name, value, schema):
try:
jsonschema.validate(value, schema)
except jsonschema.exceptions.ValidationError as e:
-
- # The error message includes the whole schema which can be very
- # large and unhelpful, so truncate it to be brief and useful
- error_msg = ' '.join(str(e).split("\n")[:3])[:-1]
- raise exception.InvalidParameterValue(
- _('Schema error for %s: %s') % (name, error_msg))
+ error_msg = _('Schema error for %s: %s') % (name, e.message)
+ # Sometimes the root message is too generic, try to find a possible
+ # root cause:
+ cause = None
+ current = e
+ while current.context:
+ current = jsonschema.exceptions.best_match(current.context)
+ cause = current.message
+ if cause is not None:
+ error_msg += _('. Possible root cause: %s') % cause
+ raise exception.InvalidParameterValue(error_msg)
return value
diff --git a/ironic/common/exception.py b/ironic/common/exception.py
index ddbce6f47..38316d2e7 100644
--- a/ironic/common/exception.py
+++ b/ironic/common/exception.py
@@ -851,3 +851,13 @@ class ImageRefIsARedirect(IronicException):
message=msg,
image_ref=image_ref,
redirect_url=redirect_url)
+
+
+class ConcurrentActionLimit(IronicException):
+ # NOTE(TheJulia): We explicitly don't report the concurrent
+ # action limit configuration value as a security guard since
+ # if informed of the limit, an attacker can tailor their attack.
+ _msg_fmt = _("Unable to process request at this time. "
+ "The concurrent action limit for %(task_type)s "
+ "has been reached. Please contact your administrator "
+ "and try again later.")
diff --git a/ironic/common/policy.py b/ironic/common/policy.py
index a56257e0f..7fdd398f9 100644
--- a/ironic/common/policy.py
+++ b/ironic/common/policy.py
@@ -437,12 +437,20 @@ node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:create',
check_str=SYSTEM_ADMIN,
- scope_types=['system'],
+ scope_types=['system', 'project'],
description='Create Node records',
operations=[{'path': '/nodes', 'method': 'POST'}],
deprecated_rule=deprecated_node_create
),
policy.DocumentedRuleDefault(
+ name='baremetal:node:create:self_owned_node',
+ check_str=('role:admin'),
+ scope_types=['project'],
+ description='Create node records which will be tracked '
+ 'as owned by the associated user project.',
+ operations=[{'path': '/nodes', 'method': 'POST'}],
+ ),
+ policy.DocumentedRuleDefault(
name='baremetal:node:list',
check_str=API_READER,
scope_types=['system', 'project'],
@@ -663,7 +671,14 @@ node_policies = [
operations=[{'path': '/nodes/{node_ident}', 'method': 'DELETE'}],
deprecated_rule=deprecated_node_delete
),
-
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:delete:self_owned_node',
+ check_str=PROJECT_ADMIN,
+ scope_types=['project'],
+ description='Delete node records which are associated with '
+ 'the requesting project.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'DELETE'}],
+ ),
policy.DocumentedRuleDefault(
name='baremetal:node:validate',
check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
diff --git a/ironic/common/pxe_utils.py b/ironic/common/pxe_utils.py
index ad51bac97..489f52737 100644
--- a/ironic/common/pxe_utils.py
+++ b/ironic/common/pxe_utils.py
@@ -675,41 +675,52 @@ def get_instance_image_info(task, ipxe_enabled=False):
os.path.join(root_dir, node.uuid, 'boot_iso'))
return image_info
-
image_properties = None
d_info = deploy_utils.get_image_instance_info(node)
+ isap = node.driver_internal_info.get('is_source_a_path')
def _get_image_properties():
- nonlocal image_properties
- if not image_properties:
- glance_service = service.GlanceImageService(context=ctx)
- image_properties = glance_service.show(
+ nonlocal image_properties, isap
+ if not image_properties and not isap:
+ i_service = service.get_image_service(
+ d_info['image_source'],
+ context=ctx)
+ image_properties = i_service.show(
d_info['image_source'])['properties']
+ # TODO(TheJulia): At some point, we should teach this code
+ # to understand that with a path, it *can* retrieve the
+ # manifest from the HTTP(S) endpoint, which can populate
+ # image_properties, and drive path to variable population
+ # like is done with basically Glance.
labels = ('kernel', 'ramdisk')
+ if not isap:
+ anaconda_labels = ('stage2', 'ks_template', 'ks_cfg')
+ else:
+ # When a path is used, a stage2 ramdisk can be determiend
+ # automatically by anaconda, so it is not an explicit
+ # requirement.
+ anaconda_labels = ('ks_template', 'ks_cfg')
+
if not (i_info.get('kernel') and i_info.get('ramdisk')):
# NOTE(rloo): If both are not specified in instance_info
# we won't use any of them. We'll use the values specified
# with the image, which we assume have been set.
_get_image_properties()
- for label in labels:
- i_info[label] = str(image_properties[label + '_id'])
- node.instance_info = i_info
- node.save()
-
- anaconda_labels = ()
- if deploy_utils.get_boot_option(node) == 'kickstart':
- isap = node.driver_internal_info.get('is_source_a_path')
+ if image_properties:
+ # This is intended for Glance usage, but all image properties
+ # should be routed through the image service request routing.
+ for label in labels:
+ i_info[label] = str(image_properties[label + '_id'])
+ node.instance_info = i_info
+ node.save()
+ # TODO(TheJulia): Add functionality to look/grab the hints file
+ # for anaconda and just run with the entire path.
+
# stage2: installer stage2 squashfs image
# ks_template: anaconda kickstart template
# ks_cfg - rendered ks_template
- if not isap:
- anaconda_labels = ('stage2', 'ks_template', 'ks_cfg')
- else:
- # When a path is used, a stage2 ramdisk can be determiend
- # automatically by anaconda, so it is not an explicit
- # requirement.
- anaconda_labels = ('ks_template', 'ks_cfg')
+
# NOTE(rloo): We save stage2 & ks_template values in case they
# are changed by the user after we start using them and to
# prevent re-computing them again.
@@ -729,26 +740,31 @@ def get_instance_image_info(task, ipxe_enabled=False):
else:
node.set_driver_internal_info(
'stage2', str(image_properties['stage2_id']))
- # NOTE(TheJulia): A kickstart template is entirely independent
- # of the stage2 ramdisk. In the end, it was the configuration which
- # told anaconda how to execute.
- if i_info.get('ks_template'):
- # If the value is set, we always overwrite it, in the event
- # a rebuild is occuring or something along those lines.
- node.set_driver_internal_info('ks_template',
- i_info['ks_template'])
+ # NOTE(TheJulia): A kickstart template is entirely independent
+ # of the stage2 ramdisk. In the end, it was the configuration which
+ # told anaconda how to execute.
+ if i_info.get('ks_template'):
+ # If the value is set, we always overwrite it, in the event
+ # a rebuild is occuring or something along those lines.
+ node.set_driver_internal_info('ks_template',
+ i_info['ks_template'])
+ else:
+ _get_image_properties()
+ # ks_template is an optional property on the image
+ if image_properties and 'ks_template' in image_properties:
+ node.set_driver_internal_info(
+ 'ks_template', str(image_properties['ks_template']))
else:
- _get_image_properties()
- # ks_template is an optional property on the image
- if 'ks_template' not in image_properties:
- # If not defined, default to the overall system default
- # kickstart template, as opposed to a user supplied
- # template.
- node.set_driver_internal_info(
- 'ks_template', CONF.anaconda.default_ks_template)
- else:
- node.set_driver_internal_info(
- 'ks_template', str(image_properties['ks_template']))
+ # If not defined, default to the overall system default
+ # kickstart template, as opposed to a user supplied
+ # template.
+ node.set_driver_internal_info(
+ 'ks_template',
+ 'file://' + os.path.abspath(
+ CONF.anaconda.default_ks_template
+ )
+ )
+
node.save()
for label in labels + anaconda_labels:
@@ -997,8 +1013,12 @@ def build_kickstart_config_options(task):
manager_utils.add_secret_token(node, pregenerated=True)
node.save()
params['liveimg_url'] = node.instance_info['image_url']
+ if node.driver_internal_info.get('is_source_a_path', False):
+ # Record a value so it matches as the template opts in.
+ params['is_source_a_path'] = 'true'
+ if CONF.anaconda.insecure_heartbeat:
+ params['insecure_heartbeat'] = 'true'
params['agent_token'] = node.driver_internal_info['agent_secret_token']
-
heartbeat_url = '%s/v1/heartbeat/%s' % (
deploy_utils.get_ironic_api_url().rstrip('/'),
node.uuid
@@ -1245,6 +1265,8 @@ def cache_ramdisk_kernel(task, pxe_info, ipxe_enabled=False):
CONF.deploy.http_root,
'stage2')
ensure_tree(os.path.dirname(file_path))
+
+ if 'ks_cfg' in pxe_info:
# ks_cfg is rendered later by the driver using ks_template. It cannot
# be fetched and cached.
t_pxe_info.pop('ks_cfg')
diff --git a/ironic/common/release_mappings.py b/ironic/common/release_mappings.py
index aaf423bfe..940321870 100644
--- a/ironic/common/release_mappings.py
+++ b/ironic/common/release_mappings.py
@@ -470,7 +470,7 @@ RELEASE_MAPPING = {
'VolumeTarget': ['1.0'],
}
},
- 'master': {
+ '21.0': {
'api': '1.79',
'rpc': '1.55',
'objects': {
@@ -490,6 +490,26 @@ RELEASE_MAPPING = {
'VolumeTarget': ['1.0'],
}
},
+ 'master': {
+ 'api': '1.80',
+ 'rpc': '1.55',
+ 'objects': {
+ 'Allocation': ['1.1'],
+ 'BIOSSetting': ['1.1'],
+ 'Node': ['1.36'],
+ 'NodeHistory': ['1.0'],
+ 'Conductor': ['1.3'],
+ 'Chassis': ['1.3'],
+ 'Deployment': ['1.0'],
+ 'DeployTemplate': ['1.1'],
+ 'Port': ['1.10'],
+ 'Portgroup': ['1.4'],
+ 'Trait': ['1.0'],
+ 'TraitList': ['1.0'],
+ 'VolumeConnector': ['1.0'],
+ 'VolumeTarget': ['1.0'],
+ }
+ },
}
# NOTE(xek): Assign each named release to the appropriate semver.
diff --git a/ironic/conductor/cleaning.py b/ironic/conductor/cleaning.py
index e3151d4b8..53d66ddd8 100644
--- a/ironic/conductor/cleaning.py
+++ b/ironic/conductor/cleaning.py
@@ -69,7 +69,7 @@ def do_node_clean(task, clean_steps=None, disable_ramdisk=False):
task.driver.power.validate(task)
if not disable_ramdisk:
task.driver.network.validate(task)
- except exception.InvalidParameterValue as e:
+ except (exception.InvalidParameterValue, exception.NetworkError) as e:
msg = (_('Validation of node %(node)s for cleaning failed: %(msg)s') %
{'node': node.uuid, 'msg': e})
return utils.cleaning_error_handler(task, msg)
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index 13d11d1d9..7e98459ff 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -886,7 +886,8 @@ class ConductorManager(base_manager.BaseConductorManager):
exception.NodeInMaintenance,
exception.InstanceDeployFailure,
exception.InvalidStateRequested,
- exception.NodeProtected)
+ exception.NodeProtected,
+ exception.ConcurrentActionLimit)
def do_node_deploy(self, context, node_id, rebuild=False,
configdrive=None, deploy_steps=None):
"""RPC method to initiate deployment to a node.
@@ -910,8 +911,11 @@ class ConductorManager(base_manager.BaseConductorManager):
:raises: InvalidStateRequested when the requested state is not a valid
target from the current state.
:raises: NodeProtected if the node is protected.
+ :raises: ConcurrentActionLimit if this action would exceed the maximum
+ number of configured concurrent actions of this type.
"""
LOG.debug("RPC do_node_deploy called for node %s.", node_id)
+ self._concurrent_action_limit(action='provisioning')
event = 'rebuild' if rebuild else 'deploy'
# NOTE(comstud): If the _sync_power_states() periodic task happens
@@ -983,7 +987,8 @@ class ConductorManager(base_manager.BaseConductorManager):
exception.NodeLocked,
exception.InstanceDeployFailure,
exception.InvalidStateRequested,
- exception.NodeProtected)
+ exception.NodeProtected,
+ exception.ConcurrentActionLimit)
def do_node_tear_down(self, context, node_id):
"""RPC method to tear down an existing node deployment.
@@ -998,8 +1003,11 @@ class ConductorManager(base_manager.BaseConductorManager):
:raises: InvalidStateRequested when the requested state is not a valid
target from the current state.
:raises: NodeProtected if the node is protected.
+ :raises: ConcurrentActionLimit if this action would exceed the maximum
+ number of configured concurrent actions of this type.
"""
LOG.debug("RPC do_node_tear_down called for node %s.", node_id)
+ self._concurrent_action_limit(action='unprovisioning')
with task_manager.acquire(context, node_id, shared=False,
purpose='node tear down') as task:
@@ -1121,7 +1129,8 @@ class ConductorManager(base_manager.BaseConductorManager):
exception.InvalidStateRequested,
exception.NodeInMaintenance,
exception.NodeLocked,
- exception.NoFreeConductorWorker)
+ exception.NoFreeConductorWorker,
+ exception.ConcurrentActionLimit)
def do_node_clean(self, context, node_id, clean_steps,
disable_ramdisk=False):
"""RPC method to initiate manual cleaning.
@@ -1150,7 +1159,10 @@ class ConductorManager(base_manager.BaseConductorManager):
:raises: NodeLocked if node is locked by another conductor.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
+ :raises: ConcurrentActionLimit If this action would exceed the
+ configured limits of the deployment.
"""
+ self._concurrent_action_limit(action='cleaning')
with task_manager.acquire(context, node_id, shared=False,
purpose='node manual cleaning') as task:
node = task.node
@@ -3549,6 +3561,40 @@ class ConductorManager(base_manager.BaseConductorManager):
# impact DB access if done in excess.
eventlet.sleep(0)
+ def _concurrent_action_limit(self, action):
+ """Check Concurrency limits and block operations if needed.
+
+ This method is used to serve as a central place for the logic
+ for checks on concurrency limits. If a limit is reached, then
+ an appropriate exception is raised.
+
+ :raises: ConcurrentActionLimit If the system configuration
+ is exceeded.
+ """
+ # NOTE(TheJulia): Keeping this all in one place for simplicity.
+ if action == 'provisioning':
+ node_count = self.dbapi.count_nodes_in_provision_state([
+ states.DEPLOYING,
+ states.DEPLOYWAIT
+ ])
+ if node_count >= CONF.conductor.max_concurrent_deploy:
+ raise exception.ConcurrentActionLimit(
+ task_type=action)
+
+ if action == 'unprovisioning' or action == 'cleaning':
+ # NOTE(TheJulia): This also checks for the deleting state
+ # which is super transitory, *but* you can get a node into
+ # the state. So in order to guard against a DoS attack, we
+ # need to check even the super transitory node state.
+ node_count = self.dbapi.count_nodes_in_provision_state([
+ states.DELETING,
+ states.CLEANING,
+ states.CLEANWAIT
+ ])
+ if node_count >= CONF.conductor.max_concurrent_clean:
+ raise exception.ConcurrentActionLimit(
+ task_type=action)
+
@METRICS.timer('get_vendor_passthru_metadata')
def get_vendor_passthru_metadata(route_dict):
diff --git a/ironic/conductor/verify.py b/ironic/conductor/verify.py
index 50180e2b8..812472b83 100644
--- a/ironic/conductor/verify.py
+++ b/ironic/conductor/verify.py
@@ -63,7 +63,7 @@ def do_node_verify(task):
except Exception as e:
error = ('Node %(node)s failed verify step %(step)s '
'with unexpected error: %(err)s' %
- {'node': node.uuid, 'step': node.verify_step,
+ {'node': node.uuid, 'step': step['step'],
'err': e})
utils.verifying_error_handler(
task, error,
diff --git a/ironic/conf/anaconda.py b/ironic/conf/anaconda.py
index 8ae3ab533..4f230ecdc 100644
--- a/ironic/conf/anaconda.py
+++ b/ironic/conf/anaconda.py
@@ -28,6 +28,17 @@ opts = [
help=_('kickstart template to use when no kickstart template '
'is specified in the instance_info or the glance OS '
'image.')),
+ cfg.BoolOpt('insecure_heartbeat',
+ default=False,
+ mutable=True,
+ help=_('Option to allow the kickstart configuration to be '
+ 'informed if SSL/TLS certificate verificaiton should '
+ 'be enforced, or not. This option exists largely to '
+ 'facilitate easy testing and use of the ``anaconda`` '
+ 'deployment interface. When this option is set, '
+ 'heartbeat operations, depending on the contents of '
+ 'the utilized kickstart template, may not enfore TLS '
+ 'certificate verification.')),
]
diff --git a/ironic/conf/api.py b/ironic/conf/api.py
index 2b0e9a824..cf59fa006 100644
--- a/ironic/conf/api.py
+++ b/ironic/conf/api.py
@@ -86,6 +86,11 @@ opts = [
'network_data_schema',
default='$pybasedir/api/controllers/v1/network-data-schema.json',
help=_("Schema for network data used by this deployment.")),
+ cfg.BoolOpt('project_admin_can_manage_own_nodes',
+ default=True,
+ mutable=True,
+ help=_('If a project scoped administrative user is permitted '
+ 'to create/delte baremetal nodes in their project.')),
]
opt_group = cfg.OptGroup(name='api',
diff --git a/ironic/conf/conductor.py b/ironic/conf/conductor.py
index b1d6bae4f..2161b9434 100644
--- a/ironic/conf/conductor.py
+++ b/ironic/conf/conductor.py
@@ -358,6 +358,32 @@ opts = [
'model. The conductor does *not* record this value '
'otherwise, and this information is not backfilled '
'for prior instances which have been deployed.')),
+ cfg.IntOpt('max_concurrent_deploy',
+ default=250,
+ min=1,
+ mutable=True,
+ help=_('The maximum number of concurrent nodes in deployment '
+ 'which are permitted in this Ironic system. '
+ 'If this limit is reached, new requests will be '
+ 'rejected until the number of deployments in progress '
+ 'is lower than this maximum. As this is a security '
+ 'mechanism requests are not queued, and this setting '
+ 'is a global setting applying to all requests this '
+ 'conductor receives, regardless of access rights. '
+ 'The concurrent deployment limit cannot be disabled.')),
+ cfg.IntOpt('max_concurrent_clean',
+ default=50,
+ min=1,
+ mutable=True,
+ help=_('The maximum number of concurrent nodes in cleaning '
+ 'which are permitted in this Ironic system. '
+ 'If this limit is reached, new requests will be '
+ 'rejected until the number of nodes in cleaning '
+ 'is lower than this maximum. As this is a security '
+ 'mechanism requests are not queued, and this setting '
+ 'is a global setting applying to all requests this '
+ 'conductor receives, regardless of access rights. '
+ 'The concurrent clean limit cannot be disabled.')),
]
diff --git a/ironic/conf/default.py b/ironic/conf/default.py
index 66555d146..0e3c32bd1 100644
--- a/ironic/conf/default.py
+++ b/ironic/conf/default.py
@@ -111,7 +111,7 @@ driver_opts = [
cfg.StrOpt('default_console_interface',
help=_DEFAULT_IFACE_HELP.format('console')),
cfg.ListOpt('enabled_deploy_interfaces',
- default=['direct'],
+ default=['direct', 'ramdisk'],
help=_ENABLED_IFACE_HELP.format('deploy')),
cfg.StrOpt('default_deploy_interface',
help=_DEFAULT_IFACE_HELP.format('deploy')),
diff --git a/ironic/conf/deploy.py b/ironic/conf/deploy.py
index 7a7fb37d7..ff2020105 100644
--- a/ironic/conf/deploy.py
+++ b/ironic/conf/deploy.py
@@ -108,7 +108,7 @@ opts = [
'state. If True, shred will be invoked and cleaning '
'will continue.')),
cfg.IntOpt('disk_erasure_concurrency',
- default=1,
+ default=4,
min=1,
mutable=True,
help=_('Defines the target pool size used by Ironic Python '
@@ -120,18 +120,6 @@ opts = [
mutable=True,
help=_('Whether to power off a node after deploy failure. '
'Defaults to True.')),
- cfg.StrOpt('default_boot_option',
- choices=[('netboot', _('boot from a network')),
- ('local', _('local boot'))],
- default='local',
- mutable=True,
- help=_('Default boot option to use when no boot option is '
- 'requested in node\'s driver_info. Defaults to '
- '"local". Prior to the Ussuri release, the default '
- 'was "netboot".'),
- deprecated_for_removal=True,
- deprecated_reason=_('Support for network boot will be removed '
- 'after the Yoga release.')),
cfg.StrOpt('default_boot_mode',
choices=[(boot_modes.UEFI, _('UEFI boot mode')),
(boot_modes.LEGACY_BIOS, _('Legacy BIOS boot mode'))],
@@ -145,9 +133,7 @@ opts = [
'to set an explicit value for this option, and if the '
'setting or default differs from nodes, to ensure that '
'nodes are configured specifically for their desired '
- 'boot mode. This option '
- 'only has effect when management interface supports '
- 'boot mode management') % {
+ 'boot mode.') % {
'bios': boot_modes.LEGACY_BIOS,
'uefi': boot_modes.UEFI}),
cfg.BoolOpt('configdrive_use_object_store',
diff --git a/ironic/conf/ilo.py b/ironic/conf/ilo.py
index 364c64c81..197378ce7 100644
--- a/ironic/conf/ilo.py
+++ b/ironic/conf/ilo.py
@@ -120,6 +120,11 @@ opts = [
'/proc/cmdline. Mind severe cmdline size limit! Can be '
'overridden by `instance_info/kernel_append_params` '
'property.')),
+ cfg.StrOpt('cert_path',
+ default='/var/lib/ironic/ilo/',
+ help=_('On the ironic-conductor node, directory where ilo '
+ 'driver stores the CSR and the cert.')),
+
]
diff --git a/ironic/conf/molds.py b/ironic/conf/molds.py
index 4cec1749a..53724598e 100644
--- a/ironic/conf/molds.py
+++ b/ironic/conf/molds.py
@@ -26,11 +26,11 @@ opts = [
cfg.StrOpt('password',
help=_('Password for "http" Basic auth. By default set '
'empty.')),
- cfg.StrOpt('retry_attempts',
+ cfg.IntOpt('retry_attempts',
default=3,
help=_('Retry attempts for saving or getting configuration '
'molds.')),
- cfg.StrOpt('retry_interval',
+ cfg.IntOpt('retry_interval',
default=3,
help=_('Retry interval for saving or getting configuration '
'molds.'))
diff --git a/ironic/db/api.py b/ironic/db/api.py
index 712919bb3..45e3fe2ca 100644
--- a/ironic/db/api.py
+++ b/ironic/db/api.py
@@ -1416,3 +1416,12 @@ class Connection(object, metaclass=abc.ABCMeta):
:param entires: A list of node history entriy id's to be
queried for deletion.
"""
+
+ @abc.abstractmethod
+ def count_nodes_in_provision_state(self, state):
+ """Count the number of nodes in given provision state.
+
+ :param state: A provision_state value to match for the
+ count operation. This can be a single provision
+ state value or a list of values.
+ """
diff --git a/ironic/db/sqlalchemy/__init__.py b/ironic/db/sqlalchemy/__init__.py
index 0f792361a..88ac079d0 100644
--- a/ironic/db/sqlalchemy/__init__.py
+++ b/ironic/db/sqlalchemy/__init__.py
@@ -13,4 +13,6 @@
from oslo_db.sqlalchemy import enginefacade
# NOTE(dtantsur): we want sqlite as close to a real database as possible.
-enginefacade.configure(sqlite_fk=True)
+# FIXME(stephenfin): we need to remove reliance on autocommit semantics ASAP
+# since it's not compatible with SQLAlchemy 2.0
+enginefacade.configure(sqlite_fk=True, __autocommit=True)
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index 05d5cc45e..c14719af8 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -30,6 +30,7 @@ from oslo_utils import timeutils
from oslo_utils import uuidutils
from osprofiler import sqlalchemy as osp_sqlalchemy
import sqlalchemy as sa
+from sqlalchemy import or_
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
@@ -2400,3 +2401,26 @@ class Connection(api.Connection):
).filter(
models.NodeHistory.id.in_(entries)
).delete(synchronize_session=False)
+
+ def count_nodes_in_provision_state(self, state):
+ if not isinstance(state, list):
+ state = [state]
+ with _session_for_read() as session:
+ # Intentionally does not use the full ORM model
+ # because that is de-duped by pkey, but we already
+ # have unique constraints on UUID/name, so... shouldn't
+ # be a big deal. #JuliaFamousLastWords.
+ # Anyway, intent here is to be as quick as possible and
+ # literally have the DB do *all* of the world, so no
+ # client side ops occur. The column is also indexed,
+ # which means this will be an index based response.
+ # TODO(TheJulia): This might need to be revised for
+ # SQLAlchemy 2.0 as it should be a scaler select and count
+ # instead.
+ return session.query(
+ models.Node.provision_state
+ ).filter(
+ or_(
+ models.Node.provision_state == v for v in state
+ )
+ ).count()
diff --git a/ironic/drivers/ilo.py b/ironic/drivers/ilo.py
index 10676b411..b6e189ee9 100644
--- a/ironic/drivers/ilo.py
+++ b/ironic/drivers/ilo.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -37,7 +38,7 @@ class IloHardware(generic.GenericHardware):
@property
def supported_boot_interfaces(self):
"""List of supported boot interfaces."""
- return [boot.IloVirtualMediaBoot, boot.IloPXEBoot, boot.IloiPXEBoot]
+ return [boot.IloVirtualMediaBoot, boot.IloiPXEBoot, boot.IloPXEBoot]
@property
def supported_bios_interfaces(self):
@@ -67,7 +68,7 @@ class IloHardware(generic.GenericHardware):
@property
def supported_vendor_interfaces(self):
- """List of supported power interfaces."""
+ """List of supported vendor interfaces."""
return [vendor.VendorPassthru, noop.NoVendor]
diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py
index c171f81b1..1c0f5465e 100644
--- a/ironic/drivers/modules/agent.py
+++ b/ironic/drivers/modules/agent.py
@@ -16,7 +16,6 @@ from urllib import parse as urlparse
from ironic_lib import metrics_utils
from oslo_log import log
-from oslo_utils import excutils
from oslo_utils import units
from ironic.common import exception
@@ -325,33 +324,7 @@ class CustomAgentDeploy(agent_base.AgentBaseMixin, agent_base.AgentDeployMixin,
if node.provision_state == states.DEPLOYING:
# Validate network interface to ensure that it supports boot
# options configured on the node.
- try:
- task.driver.network.validate(task)
- except exception.InvalidParameterValue:
- # For 'neutron' network interface validation will fail
- # if node is using 'netboot' boot option while provisioning
- # a whole disk image. Updating 'boot_option' in node's
- # 'instance_info' to 'local for backward compatibility.
- # TODO(stendulker): Fail here once the default boot
- # option is local.
- # NOTE(TheJulia): Fixing the default boot mode only
- # masks the failure as the lack of a user definition
- # can be perceived as both an invalid configuration and
- # reliance upon the default configuration. The reality
- # being that in most scenarios, users do not want network
- # booting, so the changed default should be valid.
- with excutils.save_and_reraise_exception(reraise=False) as ctx:
- instance_info = node.instance_info
- capabilities = utils.parse_instance_info_capabilities(node)
- if 'boot_option' not in capabilities:
- capabilities['boot_option'] = 'local'
- instance_info['capabilities'] = capabilities
- node.instance_info = instance_info
- node.save()
- # Re-validate the network interface
- task.driver.network.validate(task)
- else:
- ctx.reraise = True
+ task.driver.network.validate(task)
# Determine if this is a fast track sequence
fast_track_deploy = manager_utils.is_fast_track(task)
if fast_track_deploy:
@@ -597,13 +570,6 @@ class AgentDeploy(CustomAgentDeploy):
iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
cpu_arch = task.node.properties.get('cpu_arch')
- # If `boot_option` is set to `netboot`, PXEBoot.prepare_instance()
- # would need root_uuid of the whole disk image to add it into the
- # pxe config to perform chain boot.
- # IPA would have returned us the 'root_uuid_or_disk_id' if image
- # being provisioned is a whole disk image. IPA would also provide us
- # 'efi_system_partition_uuid' if the image being provisioned is a
- # partition image.
# In case of local boot using partition image, we need both
# 'root_uuid_or_disk_id' and 'efi_system_partition_uuid' to configure
# bootloader for local boot.
diff --git a/ironic/drivers/modules/agent_base.py b/ironic/drivers/modules/agent_base.py
index 582c36d90..ff2a454ea 100644
--- a/ironic/drivers/modules/agent_base.py
+++ b/ironic/drivers/modules/agent_base.py
@@ -1217,12 +1217,12 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
"""
node = task.node
- if deploy_utils.get_boot_option(node) == "local":
- # Install the boot loader
- self.configure_local_boot(
- task, root_uuid=root_uuid,
- efi_system_part_uuid=efi_sys_uuid,
- prep_boot_part_uuid=prep_boot_part_uuid)
+ # Install the boot loader
+ self.configure_local_boot(
+ task, root_uuid=root_uuid,
+ efi_system_part_uuid=efi_sys_uuid,
+ prep_boot_part_uuid=prep_boot_part_uuid)
+
try:
task.driver.boot.prepare_instance(task)
except Exception as e:
diff --git a/ironic/drivers/modules/agent_config.template b/ironic/drivers/modules/agent_config.template
deleted file mode 100644
index bf9f5f4b4..000000000
--- a/ironic/drivers/modules/agent_config.template
+++ /dev/null
@@ -1,13 +0,0 @@
-default deploy
-
-label deploy
-kernel {{ pxe_options.deployment_aki_path }}
-append initrd={{ pxe_options.deployment_ari_path }} text {{ pxe_options.pxe_append_params }}
-
-label boot_partition
-kernel {{ pxe_options.aki_path }}
-append initrd={{ pxe_options.ari_path }} root={{ ROOT }} ro text {{ pxe_options.pxe_append_params|default("", true) }}
-
-label boot_whole_disk
-COM32 chain.c32
-append mbr:{{ DISK_IDENTIFIER }}
diff --git a/ironic/drivers/modules/ansible/deploy.py b/ironic/drivers/modules/ansible/deploy.py
index cd3f4c68f..d7cf49412 100644
--- a/ironic/drivers/modules/ansible/deploy.py
+++ b/ironic/drivers/modules/ansible/deploy.py
@@ -396,12 +396,6 @@ class AnsibleDeploy(agent_base.HeartbeatMixin,
task.driver.boot.validate(task)
node = task.node
- iwdi = node.driver_internal_info.get('is_whole_disk_image')
- if not iwdi and deploy_utils.get_boot_option(node) == "netboot":
- raise exception.InvalidParameterValue(_(
- "Node %(node)s is configured to use the ansible deploy "
- "interface, which does not support netboot.") %
- {'node': node.uuid})
params = {}
image_source = node.instance_info.get('image_source')
diff --git a/ironic/drivers/modules/boot.ipxe b/ironic/drivers/modules/boot.ipxe
index 4ed58497c..95d95686a 100644
--- a/ironic/drivers/modules/boot.ipxe
+++ b/ironic/drivers/modules/boot.ipxe
@@ -10,6 +10,12 @@ isset ${net${netid}/mac} || goto loop_done
echo Attempting to boot from MAC ${net${netid}/mac:hexhyp}
chain {{ ipxe_for_mac_uri }}${net${netid}/mac:hexhyp} || goto loop
+# If we've got here the chained config returned success
+# suggesting "sanboot" in boot_whole_disk failed (some UEFI cases)
+# exit 0 so the bios continues to the next device
+echo Exiting pxe config to allow boot to continue on next device
+exit 0
+
:loop_done
{% if ipxe_fallback_script -%}
chain {{ ipxe_fallback_script }} | goto boot_failed
diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py
index bcefd2323..13f91e9cd 100644
--- a/ironic/drivers/modules/deploy_utils.py
+++ b/ironic/drivers/modules/deploy_utils.py
@@ -55,7 +55,6 @@ LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
SUPPORTED_CAPABILITIES = {
- 'boot_option': ('local', 'netboot', 'ramdisk', 'kickstart'),
'boot_mode': ('bios', 'uefi'),
'secure_boot': ('true', 'false'),
'disk_label': ('msdos', 'gpt'),
@@ -159,6 +158,9 @@ def _replace_disk_identifier(path, disk_identifier):
# NOTE(TheJulia): This should likely be migrated to pxe_utils.
+# TODO(dtantsur): with the removal of netboot, root_uuid_or_disk_id and
+# the logic of replacing ROOT can be dropped, while is_whole_disk_image can
+# be renamed to something like netboot_fallback.
def switch_pxe_config(path, root_uuid_or_disk_id, boot_mode,
is_whole_disk_image, iscsi_boot=False,
ramdisk_boot=False, ipxe_enabled=False,
@@ -616,17 +618,11 @@ def get_boot_option(node):
:returns: A string representing the boot option type. Defaults to
configuration setting [deploy]default_boot_mode.
"""
-
- # NOTE(TheJulia): Software raid always implies local deployment
- if is_software_raid(node):
- return 'local'
if is_anaconda_deploy(node):
return 'kickstart'
if is_ramdisk_deploy(node):
return 'ramdisk'
- capabilities = utils.parse_instance_info_capabilities(node)
- return capabilities.get('boot_option',
- CONF.deploy.default_boot_option).lower()
+ return 'local'
# FIXME(dtantsur): relying on deploy interface name is an anti-pattern.
@@ -1462,7 +1458,12 @@ def reboot_to_finish_step(task):
disable_ramdisk = task.node.driver_internal_info.get(
'cleaning_disable_ramdisk')
if not disable_ramdisk:
+ if manager_utils.is_fast_track(task):
+ LOG.debug('Forcing power off on node %s for a clean reboot into '
+ 'the agent image', task.node)
+ manager_utils.node_power_action(task, states.POWER_OFF)
prepare_agent_boot(task)
+
manager_utils.node_power_action(task, states.REBOOT)
return get_async_step_return_state(task.node)
diff --git a/ironic/drivers/modules/drac/raid.py b/ironic/drivers/modules/drac/raid.py
index ae06f0dfa..8bad02bba 100644
--- a/ironic/drivers/modules/drac/raid.py
+++ b/ironic/drivers/modules/drac/raid.py
@@ -1327,6 +1327,8 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
"""Perform post delete_configuration action to commit the config.
Clears foreign configuration for all RAID controllers.
+ If no foreign configuration to clear, then checks if any controllers
+ can be converted to RAID mode.
:param task: a TaskManager instance containing the node to act on.
:param raid_configs: a list of dictionaries containing the RAID
@@ -1338,7 +1340,15 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
async_proc = DracRedfishRAID._clear_foreign_config(system, task)
if async_proc:
# Async processing with system rebooting in progress
+ task.node.set_driver_internal_info(
+ 'raid_config_substep', 'clear_foreign_config')
+ task.node.save()
return deploy_utils.get_async_step_return_state(task.node)
+ else:
+ conv_state = DracRedfishRAID._convert_controller_to_raid_mode(
+ task)
+ if conv_state:
+ return conv_state
return return_state
@@ -1486,6 +1496,69 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
task_mon.wait(CONF.drac.raid_job_timeout)
return False
+ @staticmethod
+ def _convert_controller_to_raid_mode(task):
+ """Convert eligible controllers to RAID mode if not already.
+
+ :param task: a TaskManager instance containing the node to act on
+ :returns: Return state if there are controllers to convert and
+ and rebooting, otherwise None.
+ """
+
+ system = redfish_utils.get_system(task.node)
+ task_mons = []
+ warning_msg_templ = (
+ 'Possibly because `%(pkg)s` is too old. Without newer `%(pkg)s` '
+ 'PERC 9 and PERC 10 controllers that are not in RAID mode will '
+ 'not be used or have limited RAID support. To avoid that update '
+ '`%(pkg)s`')
+ for storage in system.storage.get_members():
+ storage_controllers = None
+ try:
+ storage_controllers = storage.controllers
+ except sushy.exceptions.MissingAttributeError:
+ # Check if there storage_controllers to separate old iDRAC and
+ # storage without controller
+ if storage.storage_controllers:
+ LOG.warning('%(storage)s does not have controllers for '
+ 'node %(node)s' + warning_msg_templ,
+ {'storage': storage.identity,
+ 'node': task.node.uuid,
+ 'pkg': 'iDRAC'})
+ continue
+ except AttributeError:
+ LOG.warning('%(storage)s does not have controllers attribute. '
+ + warning_msg_templ, {'storage': storage.identity,
+ 'pkg': 'sushy'})
+ return None
+ if storage_controllers:
+ controller = storage.controllers.get_members()[0]
+ try:
+ oem_controller = controller.get_oem_extension('Dell')
+ except sushy.exceptions.ExtensionError as ee:
+ LOG.warning('Failed to find extension to convert '
+ 'controller to RAID mode. '
+ + warning_msg_templ + '. Error: %(err)s',
+ {'err': ee, 'pkg': 'sushy-oem-idrac'})
+ return None
+ task_mon = oem_controller.convert_to_raid()
+ if task_mon:
+ task_mons.append(task_mon)
+
+ if task_mons:
+ deploy_utils.set_async_step_flags(
+ task.node,
+ reboot=True,
+ skip_current_step=True,
+ polling=True)
+
+ task.upgrade_lock()
+ task.node.set_driver_internal_info(
+ 'raid_task_monitor_uris',
+ [tm.task_monitor_uri for tm in task_mons])
+ task.node.save()
+ return deploy_utils.reboot_to_finish_step(task)
+
@METRICS.timer('DracRedfishRAID._query_raid_tasks_status')
@periodics.node_periodic(
purpose='checking async RAID tasks',
@@ -1545,6 +1618,15 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
else:
# all tasks completed and none of them failed
node.del_driver_internal_info('raid_task_monitor_uris')
+ substep = node.driver_internal_info.get(
+ 'raid_config_substep')
+ if substep == 'clear_foreign_config':
+ node.del_driver_internal_info('raid_config_substep')
+ node.save()
+ res = DracRedfishRAID._convert_controller_to_raid_mode(
+ task)
+ if res: # New tasks submitted
+ return
self._set_success(task)
node.save()
diff --git a/ironic/drivers/modules/ilo/boot.py b/ironic/drivers/modules/ilo/boot.py
index 7f5c5adcf..e29852981 100644
--- a/ironic/drivers/modules/ilo/boot.py
+++ b/ironic/drivers/modules/ilo/boot.py
@@ -320,7 +320,7 @@ class IloVirtualMediaBoot(base.BootInterface):
except exception.ImageRefValidationFailed:
with excutils.save_and_reraise_exception():
LOG.error("Virtual media deploy with 'ramdisk' "
- "boot_option accepts only Glance images or "
+ "deploy accepts only Glance images or "
"HTTP(S) URLs as "
"instance_info['boot_iso']. Either %s "
"is not a valid HTTP(S) URL or is not "
@@ -460,21 +460,8 @@ class IloVirtualMediaBoot(base.BootInterface):
boot_devices.CDROM,
persistent=True)
else:
- # Boot from disk every time if the image deployed is
- # a whole disk image.
- node = task.node
- iwdi = node.driver_internal_info.get('is_whole_disk_image')
- if deploy_utils.get_boot_option(node) == "local" or iwdi:
- manager_utils.node_set_boot_device(task, boot_devices.DISK,
- persistent=True)
- else:
- drv_int_info = node.driver_internal_info
- root_uuid_or_disk_id = drv_int_info.get('root_uuid_or_disk_id')
- if root_uuid_or_disk_id:
- self._configure_vmedia_boot(task, root_uuid_or_disk_id)
- else:
- LOG.warning("The UUID for the root partition could not "
- "be found for node %s", node.uuid)
+ manager_utils.node_set_boot_device(task, boot_devices.DISK,
+ persistent=True)
# Set boot mode
ilo_common.update_boot_mode(task)
# Need to enable secure boot, if being requested
@@ -590,8 +577,7 @@ class IloPXEBoot(pxe.PXEBoot):
"""Prepares the boot of instance.
This method prepares the boot of the instance after reading
- relevant information from the node's instance_info. In case of netboot,
- it updates the dhcp entries and switches the PXE config. In case of
+ relevant information from the node's instance_info. In case of
localboot, it cleans up the PXE config.
In case of 'boot from volume', it updates the iSCSI info onto iLO and
sets the node to boot from 'UefiTarget' boot device.
@@ -683,8 +669,7 @@ class IloiPXEBoot(ipxe.iPXEBoot):
"""Prepares the boot of instance.
This method prepares the boot of the instance after reading
- relevant information from the node's instance_info. In case of netboot,
- it updates the dhcp entries and switches the PXE config. In case of
+ relevant information from the node's instance_info. In case of
localboot, it cleans up the PXE config.
In case of 'boot from volume', it updates the iSCSI info onto iLO and
sets the node to boot from 'UefiTarget' boot device.
@@ -904,7 +889,7 @@ class IloUefiHttpsBoot(base.BootInterface):
except exception.ImageRefValidationFailed:
with excutils.save_and_reraise_exception():
LOG.error("UEFI-HTTPS boot with 'ramdisk' "
- "boot_option accepts only Glance images or "
+ "deploy accepts only Glance images or "
"HTTPS URLs as "
"instance_info['boot_iso']. Either %s "
"is not a valid HTTPS URL or is not "
diff --git a/ironic/drivers/modules/ilo/common.py b/ironic/drivers/modules/ilo/common.py
index 2b5b8c0db..13f975c67 100644
--- a/ironic/drivers/modules/ilo/common.py
+++ b/ironic/drivers/modules/ilo/common.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -31,6 +32,7 @@ from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
+from ironic.common import image_service
from ironic.common import images
from ironic.common import swift
from ironic.common import utils
@@ -494,6 +496,26 @@ def update_ipmi_properties(task):
task.node.driver_info = info
+def update_redfish_properties(task):
+ """Update redfish properties to node driver_info
+
+ This method updates the node's driver info with redfish driver driver_info.
+ :param task: a task from TaskManager.
+ """
+ node = task.node
+ info = node.driver_info
+
+ # updating redfish credentials
+ info['redfish_address'] = info.get('ilo_address')
+ info['redfish_username'] = info.get('ilo_username')
+ info['redfish_password'] = info.get('ilo_password')
+ info['redfish_verify_ca'] = info.get('ilo_verify_ca')
+ info['redfish_system_id'] = '/redfish/v1/Systems/1'
+
+ # saving redfish credentials to task object
+ task.node.driver_info = info
+
+
def _get_floppy_image_name(node):
"""Returns the floppy image name for a given node.
@@ -1126,3 +1148,23 @@ def setup_uefi_https(task, iso, persistent=False):
except ilo_error.IloError as ilo_exception:
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
+
+
+def download(target_file, file_url):
+ """Downloads file based on the scheme.
+
+ It downloads the file (url) to given location.
+ The supported url schemes are file, http, and https.
+ :param target_file: target file for copying the downloaded file.
+ :param file_url: source file url from where file needs to be downloaded.
+ :raises: ImageDownloadFailed, on failure to download the file.
+ """
+ parsed_url = urlparse.urlparse(file_url)
+ if parsed_url.scheme == "file":
+ src_file = parsed_url.path
+ with open(target_file, 'wb') as fd:
+ image_service.FileImageService().download(src_file, fd)
+ elif parsed_url.scheme in ('http', 'https'):
+ src_file = parsed_url.geturl()
+ with open(target_file, 'wb') as fd:
+ image_service.HttpImageService().download(src_file, fd)
diff --git a/ironic/drivers/modules/ilo/management.py b/ironic/drivers/modules/ilo/management.py
index c9a8259e6..5c4f03fb6 100644
--- a/ironic/drivers/modules/ilo/management.py
+++ b/ironic/drivers/modules/ilo/management.py
@@ -14,7 +14,8 @@
"""
iLO Management Interface
"""
-
+import os
+import shutil
from urllib import parse as urlparse
from ironic_lib import metrics_utils
@@ -79,6 +80,27 @@ _RESET_ILO_CREDENTIALS_ARGSINFO = {
}
}
+_CREATE_CSR_ARGSINFO = {
+ 'csr_params': {
+ 'description': (
+ "This arguments represents the information needed "
+ "to create the CSR certificate. The keys to be provided are "
+ "City, CommonName, OrgName, State."
+ ),
+ 'required': True
+ }
+}
+
+_ADD_HTTPS_CERT_ARGSINFO = {
+ 'cert_file': {
+ 'description': (
+ "This argument represents the path to the signed HTTPS "
+ "certificate which will be added to the iLO."
+ ),
+ 'required': True
+ }
+}
+
_SECURITY_PARAMETER_UPDATE_ARGSINFO = {
'security_parameters': {
'description': (
@@ -574,6 +596,61 @@ class IloManagement(base.ManagementInterface):
"parameter for node %(node)s is updated",
{'node': node.uuid})
+ @METRICS.timer('IloManagement.create_csr')
+ @base.clean_step(priority=0, abortable=False,
+ argsinfo=_CREATE_CSR_ARGSINFO)
+ def create_csr(self, task, **kwargs):
+ """Creates the CSR.
+
+ :param task: a TaskManager object.
+ """
+ node = task.node
+ csr_params = kwargs.get('csr_params')
+ csr_path = CONF.ilo.cert_path
+ path = os.path.join(csr_path, task.node.uuid)
+ if not os.path.exists(path):
+ os.makedirs(path, 0o755)
+
+ LOG.debug("Creating CSR for node %(node)s ..",
+ {'node': node.uuid})
+ _execute_ilo_step(node, 'create_csr', path, csr_params)
+ LOG.info("Creation of CSR for node %(node)s is "
+ "completed.", {'node': node.uuid})
+
+ @METRICS.timer('IloManagement.add_https_certificate')
+ @base.clean_step(priority=0, abortable=False,
+ argsinfo=_ADD_HTTPS_CERT_ARGSINFO)
+ def add_https_certificate(self, task, **kwargs):
+ """Adds the signed HTTPS certificate to the iLO.
+
+ :param task: a TaskManager object.
+ """
+ node = task.node
+ csr_path = CONF.ilo.cert_path
+ path = os.path.join(csr_path, task.node.uuid)
+ if not os.path.exists(path):
+ os.makedirs(path, 0o755)
+ cert_file_name = node.uuid + ".crt"
+ cert_file_path = os.path.join(path, cert_file_name)
+ cert_file = kwargs.get('cert_file')
+ url_scheme = urlparse.urlparse(cert_file).scheme
+ if url_scheme == '':
+ shutil.copy(cert_file, cert_file_path)
+ elif url_scheme in ('http', 'https', 'file'):
+ ilo_common.download(cert_file_path, cert_file)
+ else:
+ msg = (_("The url scheme %(scheme)s not supported with clean step "
+ "%(step)s") % {'scheme': url_scheme,
+ 'step': 'add_https_certificate'})
+ raise exception.IloOperationNotSupported(operation='clean step',
+ error=msg)
+
+ LOG.debug("Adding the signed HTTPS certificate to the "
+ "node %(node)s ..", {'node': node.uuid})
+ _execute_ilo_step(node, 'add_https_certificate', cert_file_path)
+ LOG.info("Adding of HTTPS certificate to the node %(node)s "
+ "is completed.", {'node': node.uuid})
+
@METRICS.timer('IloManagement.update_firmware')
@base.deploy_step(priority=0, argsinfo=_FIRMWARE_UPDATE_ARGSINFO)
@base.clean_step(priority=0, abortable=False,
diff --git a/ironic/drivers/modules/ilo/power.py b/ironic/drivers/modules/ilo/power.py
index ee8fcc794..a1363fb52 100644
--- a/ironic/drivers/modules/ilo/power.py
+++ b/ironic/drivers/modules/ilo/power.py
@@ -44,9 +44,8 @@ def _attach_boot_iso_if_needed(task):
This method checks the instance info of the baremetal node for a
boot iso. If the instance info has a value of key 'boot_iso',
- it indicates that 'boot_option' is 'netboot'. Therefore it attaches
- the boot ISO on the baremetal node and then sets the node to boot from
- virtual media cdrom.
+ it indicates ramdisk deploy. Therefore it attaches the boot ISO on the
+ baremetal node and then sets the node to boot from virtual media cdrom.
:param task: a TaskManager instance containing the node to act on.
"""
diff --git a/ironic/drivers/modules/ilo/vendor.py b/ironic/drivers/modules/ilo/vendor.py
index 2f4986a2f..fa0400703 100644
--- a/ironic/drivers/modules/ilo/vendor.py
+++ b/ironic/drivers/modules/ilo/vendor.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -25,16 +26,14 @@ from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
+from ironic.drivers.modules.redfish import vendor as redfish_vendor
METRICS = metrics_utils.get_metrics_logger(__name__)
-class VendorPassthru(base.VendorInterface):
+class VendorPassthru(redfish_vendor.RedfishVendorPassthru):
"""Vendor-specific interfaces for iLO deploy drivers."""
- def get_properties(self):
- return {}
-
@METRICS.timer('IloVendorPassthru.validate')
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
@@ -50,10 +49,26 @@ class VendorPassthru(base.VendorInterface):
passed.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
+ :raises: IloOperationNotSupported, if the driver does not support the
+ given operation with ilo vendor interface.
"""
if method == 'boot_into_iso':
self._validate_boot_into_iso(task, kwargs)
return
+ redfish_event_methods = ['create_subscription',
+ 'delete_subscription',
+ 'get_all_subscriptions', 'get_subscription']
+ if method in redfish_event_methods:
+ self._validate_is_it_a_supported_system(task)
+ ilo_common.parse_driver_info(task.node)
+ ilo_common.update_redfish_properties(task)
+ if method == 'eject_vmedia':
+ error_message = _(method + (
+ " can not be performed as the driver does not support "
+ "eject_vmedia through ilo vendor interface"))
+ raise exception.IloOperationNotSupported(operation=method,
+ error=error_message)
+
super(VendorPassthru, self).validate(task, method, **kwargs)
def _validate_boot_into_iso(self, task, kwargs):
@@ -99,3 +114,23 @@ class VendorPassthru(base.VendorInterface):
ilo_common.setup_vmedia(task, kwargs['boot_iso_href'],
ramdisk_options=None)
manager_utils.node_power_action(task, states.REBOOT)
+
+ def _validate_is_it_a_supported_system(self, task):
+ """Verify and raise an exception if it is not a supported system.
+
+ :param task: A TaskManager object.
+ :param kwargs: The arguments sent with vendor passthru.
+ :raises: IloOperationNotSupported, if the node is not a Gen10 or
+ Gen10 Plus system.
+ """
+
+ node = task.node
+ ilo_object = ilo_common.get_ilo_object(node)
+ product_name = ilo_object.get_product_name()
+ operation = _("Event methods")
+ error_message = _(operation + (
+ " can not be performed as the driver does not support Event "
+ "methods on the given node"))
+ if 'Gen10' not in product_name:
+ raise exception.IloOperationNotSupported(operation=operation,
+ error=error_message)
diff --git a/ironic/drivers/modules/image_cache.py b/ironic/drivers/modules/image_cache.py
index 17dfba9cf..5ca053f36 100644
--- a/ironic/drivers/modules/image_cache.py
+++ b/ironic/drivers/modules/image_cache.py
@@ -237,6 +237,7 @@ class ImageCache(object):
"""
threshold = time.time() - self._cache_ttl
survived = []
+ count = 0
for file_name, last_used, stat in listing:
if last_used < threshold:
try:
@@ -246,6 +247,7 @@ class ImageCache(object):
"master image cache: %(exc)s",
{'name': file_name, 'exc': exc})
else:
+ count += 1
if amount is not None:
amount -= stat.st_size
if amount <= 0:
@@ -253,6 +255,9 @@ class ImageCache(object):
break
else:
survived.append((file_name, last_used, stat))
+ if count:
+ LOG.debug('Removed %(count)d expired file(s) from %(dir)s',
+ {'count': count, 'dir': self.master_dir})
return survived, amount
def _clean_up_ensure_cache_size(self, listing, amount):
@@ -275,6 +280,7 @@ class ImageCache(object):
for f in os.listdir(self.master_dir))
total_size = sum(os.path.getsize(f)
for f in total_listing)
+ count = 0
while listing and (total_size > self._cache_size
or (amount is not None and amount > 0)):
file_name, last_used, stat = listing.pop()
@@ -286,6 +292,7 @@ class ImageCache(object):
{'name': file_name, 'exc': exc})
else:
total_size -= stat.st_size
+ count += 1
if amount is not None:
amount -= stat.st_size
@@ -295,6 +302,10 @@ class ImageCache(object):
"threshold %(expected)d",
{'dir': self.master_dir, 'actual': total_size,
'expected': self._cache_size})
+ elif count:
+ LOG.debug(
+ 'Removed %(count)d file(s) from %(dir)s to free up space',
+ {'count': count, 'dir': self.master_dir})
return max(amount, 0) if amount is not None else 0
diff --git a/ironic/drivers/modules/image_utils.py b/ironic/drivers/modules/image_utils.py
index bb0dfa166..304c199bf 100644
--- a/ironic/drivers/modules/image_utils.py
+++ b/ironic/drivers/modules/image_utils.py
@@ -169,7 +169,7 @@ class ImageHandler(object):
return urlparse.urlunparse(parsed_url)
- def publish_image(self, image_file, object_name):
+ def publish_image(self, image_file, object_name, node_http_url=None):
"""Make image file downloadable.
Depending on ironic settings, pushes given file into Swift or copies
@@ -178,6 +178,9 @@ class ImageHandler(object):
:param image_file: path to file to publish
:param object_name: name of the published file
+ :param node_http_url: a url to be used to publish the image. If set,
+ the values from external_http_url and http_url
+ from CONF.deploy won't be used.
:return: a URL to download published file
"""
@@ -220,7 +223,8 @@ class ImageHandler(object):
shutil.copyfile(image_file, published_file)
os.chmod(published_file, self._file_permission)
- http_url = CONF.deploy.external_http_url or CONF.deploy.http_url
+ http_url = (node_http_url or CONF.deploy.external_http_url
+ or CONF.deploy.http_url)
image_url = os.path.join(http_url, self._image_subdir, object_name)
return image_url
@@ -302,8 +306,9 @@ def prepare_floppy_image(task, params=None):
images.create_vfat_image(vfat_image_tmpfile, parameters=params)
img_handler = ImageHandler(task.node.driver)
-
- image_url = img_handler.publish_image(vfat_image_tmpfile, object_name)
+ node_http_url = task.node.driver_info.get("external_http_url")
+ image_url = img_handler.publish_image(vfat_image_tmpfile, object_name,
+ node_http_url)
LOG.debug("Created floppy image %(name)s in Swift for node %(node)s, "
"exposed as temporary URL "
diff --git a/ironic/drivers/modules/ipxe_config.template b/ironic/drivers/modules/ipxe_config.template
index bca63c982..650083869 100644
--- a/ironic/drivers/modules/ipxe_config.template
+++ b/ironic/drivers/modules/ipxe_config.template
@@ -25,12 +25,6 @@ echo Powering off in 30 seconds.
sleep 30
poweroff
-:boot_partition
-imgfree
-kernel {% if pxe_options.ipxe_timeout > 0 %}--timeout {{ pxe_options.ipxe_timeout }} {% endif %}{{ pxe_options.aki_path }} root={{ ROOT }} ro text {{ pxe_options.pxe_append_params|default("", true) }} initrd=ramdisk || goto boot_partition
-initrd {% if pxe_options.ipxe_timeout > 0 %}--timeout {{ pxe_options.ipxe_timeout }} {% endif %}{{ pxe_options.ari_path }} || goto boot_partition
-boot
-
:boot_anaconda
imgfree
kernel {% if pxe_options.ipxe_timeout > 0 %}--timeout {{ pxe_options.ipxe_timeout }} {% endif %}{{ pxe_options.aki_path }} text {{ pxe_options.pxe_append_params|default("", true) }} inst.ks={{ pxe_options.ks_cfg_url }} {% if pxe_options.repo_url %}inst.repo={{ pxe_options.repo_url }}{% else %}inst.stage2={{ pxe_options.stage2_url }}{% endif %} initrd=ramdisk || goto boot_anaconda
@@ -72,4 +66,4 @@ goto boot_iscsi
{%- endif %}
:boot_whole_disk
-sanboot --no-describe
+sanboot --no-describe || exit 0
diff --git a/ironic/drivers/modules/irmc/boot.py b/ironic/drivers/modules/irmc/boot.py
index 84964bd2f..11153a6f1 100644
--- a/ironic/drivers/modules/irmc/boot.py
+++ b/ironic/drivers/modules/irmc/boot.py
@@ -376,9 +376,8 @@ def attach_boot_iso_if_needed(task):
This method checks the instance info of the bare metal node for a
boot ISO. If the instance info has a value of key 'boot_iso',
- it indicates that 'boot_option' is 'netboot'. Threfore it attaches
- the boot ISO on the bare metal node and then sets the node to boot from
- virtual media cdrom.
+ it indicates ramdisk deploy. Therefore it attaches the boot ISO on the bare
+ metal node and then sets the node to boot from virtual media cdrom.
:param task: a TaskManager instance containing the node to act on.
:raises: IRMCOperationError if attaching virtual media failed.
diff --git a/ironic/drivers/modules/irmc/common.py b/ironic/drivers/modules/irmc/common.py
index 00b7c0625..7a8fc0f1d 100644
--- a/ironic/drivers/modules/irmc/common.py
+++ b/ironic/drivers/modules/irmc/common.py
@@ -15,8 +15,11 @@
"""
Common functionalities shared between different iRMC modules.
"""
+import os
+
from oslo_log import log as logging
from oslo_utils import importutils
+from oslo_utils import strutils
from ironic.common import exception
from ironic.common.i18n import _
@@ -46,6 +49,16 @@ OPTIONAL_PROPERTIES = {
"'ipmitool' or 'scci'. The default value is "
"'ipmitool'. Optional."),
}
+OPTIONAL_DRIVER_INFO_PROPERTIES = {
+ 'irmc_verify_ca': _('Either a Boolean value, a path to a CA_BUNDLE '
+ 'file or directory with certificates of trusted '
+ 'CAs. If set to True the driver will verify the '
+ 'host certificates; if False the driver will '
+ 'ignore verifying the SSL certificate. If it\'s '
+ 'a path the driver will use the specified '
+ 'certificate or one of the certificates in the '
+ 'directory. Defaults to True. Optional'),
+}
SNMP_PROPERTIES = {
'irmc_snmp_version': _("SNMP protocol version; either 'v1', 'v2c', or "
@@ -84,6 +97,7 @@ SNMP_V3_DEPRECATED_PROPERTIES = {
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
+COMMON_PROPERTIES.update(OPTIONAL_DRIVER_INFO_PROPERTIES)
COMMON_PROPERTIES.update(SNMP_PROPERTIES)
COMMON_PROPERTIES.update(SNMP_V3_REQUIRED_PROPERTIES)
COMMON_PROPERTIES.update(SNMP_V3_OPTIONAL_PROPERTIES)
@@ -116,7 +130,9 @@ def parse_driver_info(node):
# corresponding config names don't have 'irmc_' prefix
opt = {param: info.get(param, CONF.irmc.get(param[len('irmc_'):]))
for param in OPTIONAL_PROPERTIES}
- d_info = dict(req, **opt)
+ opt_driver_info = {param: info.get(param)
+ for param in OPTIONAL_DRIVER_INFO_PROPERTIES}
+ d_info = dict(req, **opt, **opt_driver_info)
d_info['irmc_port'] = utils.validate_network_port(
d_info['irmc_port'], 'irmc_port')
@@ -137,6 +153,38 @@ def parse_driver_info(node):
error_msgs.append(
_("Value '%s' is not supported for 'irmc_sensor_method'.") %
d_info['irmc_sensor_method'])
+
+ verify_ca = d_info.get('irmc_verify_ca')
+ if verify_ca is None:
+ d_info['irmc_verify_ca'] = verify_ca = CONF.webserver_verify_ca
+
+ # Check if verify_ca is a Boolean or a file/directory in the file-system
+ if isinstance(verify_ca, str):
+ if ((os.path.isdir(verify_ca) and os.path.isabs(verify_ca))
+ or (os.path.isfile(verify_ca) and os.path.isabs(verify_ca))):
+ # If it's fullpath and dir/file, we don't need to do anything
+ pass
+ else:
+ try:
+ d_info['irmc_verify_ca'] = strutils.bool_from_string(
+ verify_ca, strict=True)
+ except ValueError:
+ error_msgs.append(
+ _('Invalid value type set in driver_info/'
+ 'irmc_verify_ca on node %(node)s. '
+ 'The value should be a Boolean or the path '
+ 'to a file/directory, not "%(value)s"'
+ ) % {'value': verify_ca, 'node': node.uuid})
+ elif isinstance(verify_ca, bool):
+ # If it's a boolean it's grand, we don't need to do anything
+ pass
+ else:
+ error_msgs.append(
+ _('Invalid value type set in driver_info/irmc_verify_ca '
+ 'on node %(node)s. The value should be a Boolean or the path '
+ 'to a file/directory, not "%(value)s"') % {'value': verify_ca,
+ 'node': node.uuid})
+
if error_msgs:
msg = (_("The following errors were encountered while parsing "
"driver_info:\n%s") % "\n".join(error_msgs))
@@ -287,6 +335,7 @@ def get_irmc_client(node):
:raises: InvalidParameterValue on invalid inputs.
:raises: MissingParameterValue if some mandatory information
is missing on the node
+ :raises: IRMCOperationError if iRMC operation failed
"""
driver_info = parse_driver_info(node)
@@ -296,6 +345,7 @@ def get_irmc_client(node):
driver_info['irmc_password'],
port=driver_info['irmc_port'],
auth_method=driver_info['irmc_auth_method'],
+ verify=driver_info.get('irmc_verify_ca'),
client_timeout=driver_info['irmc_client_timeout'])
return scci_client
@@ -338,6 +388,7 @@ def get_irmc_report(node):
driver_info['irmc_password'],
port=driver_info['irmc_port'],
auth_method=driver_info['irmc_auth_method'],
+ verify=driver_info.get('irmc_verify_ca'),
client_timeout=driver_info['irmc_client_timeout'])
diff --git a/ironic/drivers/modules/ks.cfg.template b/ironic/drivers/modules/ks.cfg.template
index 825ea38c8..93788fdb8 100644
--- a/ironic/drivers/modules/ks.cfg.template
+++ b/ironic/drivers/modules/ks.cfg.template
@@ -15,19 +15,32 @@ zerombr
clearpart --all --initlabel
autopart
-# Downloading and installing OS image using liveimg section is mandatory
-# in a *default* ironic configuration. Users (infrastructure operators)
+# Downloading and installing OS image using "liveimg" section is the
+# default mode of operation for an OpenStack-integrated Ironic
+# deployment where Glance is in use. Users (infrastructure operators)
# may choose to customize this pattern, or use release specific kickstart
# configurations which may already point to a mirror.
+#
+# An alternative is "url", which points to a repository of files used for
+# the deploy, similar to mounting an ISO media and exposing the files.
+
+{% if 'is_source_a_path' in ks_options -%}
+url --url {{ks_options.liveimg_url }}
+
+# If packages are not selected, a URL based auto-deployment fails.
+%packages --ignoremissing
+%end
+{% else -%}
liveimg --url {{ ks_options.liveimg_url }}
+{% endif -%}
# Following %pre and %onerror sections are mandatory
%pre
-/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "start", "agent_status_message": "Deployment starting. Running pre-installation scripts."}' {{ ks_options.heartbeat_url }}
+/usr/bin/curl {% if 'insecure_heartbeat' in ks_options %}--insecure{% endif %} -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "start", "agent_status_message": "Deployment starting. Running pre-installation scripts."}' {{ ks_options.heartbeat_url }}
%end
%onerror
-/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "error", "agent_status_message": "Error: Deploying using anaconda. Check console for more information."}' {{ ks_options.heartbeat_url }}
+/usr/bin/curl {% if 'insecure_heartbeat' in ks_options %}--insecure{% endif %} -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "error", "agent_status_message": "Error: Deploying using anaconda. Check console for more information."}' {{ ks_options.heartbeat_url }}
%end
# Config-drive information, if any.
@@ -41,5 +54,5 @@ liveimg --url {{ ks_options.liveimg_url }}
# before rebooting.
%post
sync
-/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "end", "agent_status_message": "Deployment completed successfully."}' {{ ks_options.heartbeat_url }}
+/usr/bin/curl {% if 'insecure_heartbeat' in ks_options %}--insecure{% endif %} -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "end", "agent_status_message": "Deployment completed successfully."}' {{ ks_options.heartbeat_url }}
%end
diff --git a/ironic/drivers/modules/network/neutron.py b/ironic/drivers/modules/network/neutron.py
index 3e4dcbfd6..2693b603e 100644
--- a/ironic/drivers/modules/network/neutron.py
+++ b/ironic/drivers/modules/network/neutron.py
@@ -20,9 +20,7 @@ from oslo_log import log
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import neutron
-from ironic.common import states
from ironic.drivers import base
-from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.network import common
LOG = log.getLogger(__name__)
@@ -61,15 +59,6 @@ class NeutronNetwork(common.NeutronVIFPortIDMixin,
"""
self.get_cleaning_network_uuid(task)
self.get_provisioning_network_uuid(task)
- node = task.node
- if (node.provision_state == states.DEPLOYING
- and node.driver_internal_info.get('is_whole_disk_image')
- and deploy_utils.get_boot_option(node) == 'netboot'):
- error_msg = (_('The node %s cannot perform "local" boot for '
- 'whole disk image when node is using "neutron" '
- 'network and is configured with "netboot" boot '
- 'option.') % node.uuid)
- raise exception.InvalidParameterValue(error_msg)
def _add_network(self, task, network, security_groups, process):
# If we have left over ports from a previous process, remove them
diff --git a/ironic/drivers/modules/pxe_base.py b/ironic/drivers/modules/pxe_base.py
index a8a768b8b..daa90ba8d 100644
--- a/ironic/drivers/modules/pxe_base.py
+++ b/ironic/drivers/modules/pxe_base.py
@@ -261,50 +261,6 @@ class PXEBaseMixin(object):
anaconda_boot=(boot_option == "kickstart"))
boot_device = boot_devices.PXE
- elif boot_option != "local":
- if task.driver.storage.should_write_image(task):
- # Make sure that the instance kernel/ramdisk is cached.
- # This is for the takeover scenario for active nodes.
- instance_image_info = pxe_utils.get_instance_image_info(
- task, ipxe_enabled=self.ipxe_enabled)
- pxe_utils.cache_ramdisk_kernel(task, instance_image_info,
- ipxe_enabled=self.ipxe_enabled)
-
- # If it's going to PXE boot we need to update the DHCP server
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=self.ipxe_enabled, ip_version=4)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=self.ipxe_enabled, ip_version=6)
- provider = dhcp_factory.DHCPFactory()
- provider.update_dhcp(task, dhcp_opts)
-
- iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
- try:
- root_uuid_or_disk_id = task.node.driver_internal_info[
- 'root_uuid_or_disk_id'
- ]
- except KeyError:
- if not task.driver.storage.should_write_image(task):
- pass
- elif not iwdi:
- LOG.warning("The UUID for the root partition can't be "
- "found, unable to switch the pxe config from "
- "deployment mode to service (boot) mode for "
- "node %(node)s", {"node": task.node.uuid})
- else:
- LOG.warning("The disk id for the whole disk image can't "
- "be found, unable to switch the pxe config "
- "from deployment mode to service (boot) mode "
- "for node %(node)s. Booting the instance "
- "from disk.", {"node": task.node.uuid})
- pxe_utils.clean_up_pxe_config(
- task, ipxe_enabled=self.ipxe_enabled)
- boot_device = boot_devices.DISK
- else:
- pxe_utils.build_service_pxe_config(
- task, instance_image_info, root_uuid_or_disk_id,
- ipxe_enabled=self.ipxe_enabled)
- boot_device = boot_devices.PXE
else:
# NOTE(dtantsur): create a PXE configuration as a safety net for
# hardware uncapable of persistent boot. If on a reboot it will try
diff --git a/ironic/drivers/modules/pxe_config.template b/ironic/drivers/modules/pxe_config.template
index 9b773b2ba..bf4cec11a 100644
--- a/ironic/drivers/modules/pxe_config.template
+++ b/ironic/drivers/modules/pxe_config.template
@@ -5,12 +5,6 @@ kernel {{ pxe_options.deployment_aki_path }}
append initrd={{ pxe_options.deployment_ari_path }} selinux=0 troubleshoot=0 text {{ pxe_options.pxe_append_params|default("", true) }}
ipappend 2
-
-label boot_partition
-kernel {{ pxe_options.aki_path }}
-append initrd={{ pxe_options.ari_path }} root={{ ROOT }} ro text {{ pxe_options.pxe_append_params|default("", true) }}
-
-
label boot_whole_disk
COM32 chain.c32
append mbr:{{ DISK_IDENTIFIER }}
diff --git a/ironic/drivers/modules/pxe_grub_config.template b/ironic/drivers/modules/pxe_grub_config.template
index d33cbb8cd..d8fc48673 100644
--- a/ironic/drivers/modules/pxe_grub_config.template
+++ b/ironic/drivers/modules/pxe_grub_config.template
@@ -7,11 +7,6 @@ menuentry "deploy" {
initrdefi {{ pxe_options.deployment_ari_path }}
}
-menuentry "boot_partition" {
- linuxefi {{ pxe_options.aki_path }} root={{ ROOT }} ro text {{ pxe_options.pxe_append_params|default("", true) }} boot_server={{pxe_options.tftp_server}}
- initrdefi {{ pxe_options.ari_path }}
-}
-
menuentry "boot_ramdisk" {
linuxefi {{ pxe_options.aki_path }} root=/dev/ram0 text {{ pxe_options.pxe_append_params|default("", true) }} {{ pxe_options.ramdisk_opts|default('', true) }}
initrdefi {{ pxe_options.ari_path }}
diff --git a/ironic/drivers/modules/redfish/bios.py b/ironic/drivers/modules/redfish/bios.py
index c2eb8fcbc..44742795e 100644
--- a/ironic/drivers/modules/redfish/bios.py
+++ b/ironic/drivers/modules/redfish/bios.py
@@ -19,7 +19,6 @@ from oslo_utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
-from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
@@ -55,20 +54,23 @@ class RedfishBIOS(base.BIOSInterface):
driver='redfish',
reason=_("Unable to import the sushy library"))
- def _parse_allowable_values(self, allowable_values):
+ def _parse_allowable_values(self, node, allowable_values):
"""Convert the BIOS registry allowable_value list to expected strings
:param allowable_values: list of dicts of valid values for enumeration
:returns: list containing only allowable value names
"""
- # Get name from ValueName if it exists, otherwise use DisplayValueName
+ # Get name from ValueName if it exists, otherwise use ValueDisplayName
new_list = []
for dic in allowable_values:
- for key in dic:
- if key == 'ValueName' or key == 'DisplayValueName':
- new_list.append(dic[key])
- break
+ key = dic.get('ValueName') or dic.get('ValueDisplayName')
+ if key:
+ new_list.append(key)
+ else:
+ LOG.warning('Cannot detect the value name for enumeration '
+ 'item %(item)s for node %(node)s',
+ {'item': dic, 'node': node.uuid})
return new_list
@@ -130,7 +132,8 @@ class RedfishBIOS(base.BIOSInterface):
setting[k] = getattr(reg, k, None)
if k == "allowable_values" and isinstance(setting[k],
list):
- setting[k] = self._parse_allowable_values(setting[k])
+ setting[k] = self._parse_allowable_values(
+ task.node, setting[k])
LOG.debug('Cache BIOS settings for node %(node_uuid)s',
{'node_uuid': task.node.uuid})
@@ -185,9 +188,8 @@ class RedfishBIOS(base.BIOSInterface):
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
- self.post_reset(task)
self._set_reboot(task)
- return deploy_utils.get_async_step_return_state(task.node)
+ return self.post_reset(task)
else:
current_attrs = bios.attributes
LOG.debug('Post factory reset, BIOS configuration for node '
@@ -244,9 +246,8 @@ class RedfishBIOS(base.BIOSInterface):
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
- self.post_configuration(task, settings)
self._set_reboot_requested(task, attributes)
- return deploy_utils.get_async_step_return_state(task.node)
+ return self.post_configuration(task, settings)
else:
# Step 2: Verify requested BIOS settings applied
requested_attrs = info.get('requested_bios_attrs')
@@ -267,8 +268,7 @@ class RedfishBIOS(base.BIOSInterface):
:param task: a TaskManager instance containing the node to act on.
"""
- deploy_utils.prepare_agent_boot(task)
- self._reboot(task)
+ return deploy_utils.reboot_to_finish_step(task)
def post_configuration(self, task, settings):
"""Perform post configuration action to store the BIOS settings.
@@ -281,8 +281,7 @@ class RedfishBIOS(base.BIOSInterface):
:param task: a TaskManager instance containing the node to act on.
:param settings: a list of BIOS settings to be updated.
"""
- deploy_utils.prepare_agent_boot(task)
- self._reboot(task)
+ return deploy_utils.reboot_to_finish_step(task)
def get_properties(self):
"""Return the properties of the interface.
@@ -322,17 +321,6 @@ class RedfishBIOS(base.BIOSInterface):
LOG.debug('Verification of BIOS settings for node %(node_uuid)s '
'successful.', {'node_uuid': task.node.uuid})
- @task_manager.require_exclusive_lock
- def _reboot(self, task):
- """Reboot the target Redfish service.
-
- :param task: a TaskManager instance containing the node to act on.
- :raises: InvalidParameterValue when the wrong state is specified
- or the wrong driver info is specified.
- :raises: RedfishError on an error from the Sushy library
- """
- manager_utils.node_power_action(task, states.REBOOT)
-
def _set_reboot(self, task):
"""Set driver_internal_info flags for deployment or cleaning reboot.
diff --git a/ironic/drivers/modules/redfish/boot.py b/ironic/drivers/modules/redfish/boot.py
index 164425eee..a321c08ec 100644
--- a/ironic/drivers/modules/redfish/boot.py
+++ b/ironic/drivers/modules/redfish/boot.py
@@ -53,7 +53,11 @@ OPTIONAL_PROPERTIES = {
"used by ironic when building UEFI-bootable ISO "
"out of kernel and ramdisk. Required for UEFI "
"when deploy_iso is not provided."),
-
+ 'external_http_url': _("External URL that is used when the image could "
+ "be served outside of the provisioning network. "
+ "If set it will have priority over the following "
+ "configs: CONF.deploy.external_http_url and "
+ "CONF.deploy.http_url. Defaults to None.")
}
RESCUE_PROPERTIES = {
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
index 77abdef0e..809ec59c6 100644
--- a/ironic/drivers/modules/redfish/raid.py
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -693,6 +693,7 @@ def update_raid_config(node):
"""
system = redfish_utils.get_system(node)
logical_disks = []
+ vol_no_raid_type = []
for stor in system.storage.get_members():
for vol in stor.volumes.get_members():
if vol.raid_type:
@@ -705,7 +706,14 @@ def update_raid_config(node):
key for key, value in RAID_LEVELS.items()
if value['raid_type'] == vol.raid_type.value)
}
- logical_disks.append(logical_disk)
+ logical_disks.append(logical_disk)
+ else:
+ vol_no_raid_type.append(vol.identity)
+
+ if vol_no_raid_type:
+ LOG.warning("Unable to update raid_config for volumes missing RAID "
+ "type: %(vol_no_raid_type)s",
+ {'vol_no_raid_type': ", ".join(vol_no_raid_type)})
raid_common.update_raid_info(node, {'logical_disks': logical_disks})
diff --git a/ironic/drivers/modules/redfish/utils.py b/ironic/drivers/modules/redfish/utils.py
index 40cf33bce..e85e2ec6a 100644
--- a/ironic/drivers/modules/redfish/utils.py
+++ b/ironic/drivers/modules/redfish/utils.py
@@ -15,6 +15,7 @@
# under the License.
import collections
+import hashlib
import os
from urllib import parse as urlparse
@@ -198,43 +199,59 @@ class SessionCache(object):
_sessions = collections.OrderedDict()
def __init__(self, driver_info):
+ # Hash the password in the data structure, so we can
+ # include it in the session key.
+ # NOTE(TheJulia): Multiplying the address by 4, to ensure
+ # we meet a minimum of 16 bytes for salt.
+ pw_hash = hashlib.pbkdf2_hmac(
+ 'sha512',
+ driver_info.get('password').encode('utf-8'),
+ str(driver_info.get('address') * 4).encode('utf-8'), 40)
self._driver_info = driver_info
+ # Assemble the session key and append the hashed password to it,
+ # which forces new sessions to be established when the saved password
+ # is changed, just like the username, or address.
self._session_key = tuple(
self._driver_info.get(key)
for key in ('address', 'username', 'verify_ca')
- )
+ ) + (pw_hash.hex(),)
def __enter__(self):
try:
return self.__class__._sessions[self._session_key]
-
except KeyError:
- auth_type = self._driver_info['auth_type']
+ LOG.debug('A cached redfish session for Redfish endpoint '
+ '%(endpoint)s was not detected, initiating a session.',
+ {'endpoint': self._driver_info['address']})
- auth_class = self.AUTH_CLASSES[auth_type]
+ auth_type = self._driver_info['auth_type']
- authenticator = auth_class(
- username=self._driver_info['username'],
- password=self._driver_info['password']
- )
+ auth_class = self.AUTH_CLASSES[auth_type]
- sushy_params = {'verify': self._driver_info['verify_ca'],
- 'auth': authenticator}
- if 'root_prefix' in self._driver_info:
- sushy_params['root_prefix'] = self._driver_info['root_prefix']
- conn = sushy.Sushy(
- self._driver_info['address'],
- **sushy_params
- )
+ authenticator = auth_class(
+ username=self._driver_info['username'],
+ password=self._driver_info['password']
+ )
+
+ sushy_params = {'verify': self._driver_info['verify_ca'],
+ 'auth': authenticator}
+ if 'root_prefix' in self._driver_info:
+ sushy_params['root_prefix'] = self._driver_info['root_prefix']
+ conn = sushy.Sushy(
+ self._driver_info['address'],
+ **sushy_params
+ )
- if CONF.redfish.connection_cache_size:
- self.__class__._sessions[self._session_key] = conn
+ if CONF.redfish.connection_cache_size:
+ self.__class__._sessions[self._session_key] = conn
+ # Save a secure hash of the password into memory, so if we
+ # observe it change, we can detect the session is no longer valid.
- if (len(self.__class__._sessions)
- > CONF.redfish.connection_cache_size):
- self._expire_oldest_session()
+ if (len(self.__class__._sessions)
+ > CONF.redfish.connection_cache_size):
+ self._expire_oldest_session()
- return conn
+ return conn
def __exit__(self, exc_type, exc_val, exc_tb):
# NOTE(etingof): perhaps this session token is no good
diff --git a/ironic/drivers/modules/snmp.py b/ironic/drivers/modules/snmp.py
index 4e700c6f8..d544d5687 100644
--- a/ironic/drivers/modules/snmp.py
+++ b/ironic/drivers/modules/snmp.py
@@ -799,6 +799,341 @@ class SNMPDriverBaytechMRP27(SNMPDriverSimple):
value_power_on = 1
+class SNMPDriverServerTechSentry3(SNMPDriverBase):
+ """SNMP driver class for Server Technology Sentry 3 PDUs.
+
+ ftp://ftp.servertech.com/Pub/SNMP/sentry3/Sentry3.mib
+
+ SNMP objects for Server Technology Power PDU.
+ 1.3.6.1.4.1.1718.3.2.3.1.5.1.1.<outlet ID> outletStatus
+ Read 0=off, 1=on, 2=off wait, 3=on wait, [...more options follow]
+ 1.3.6.1.4.1.1718.3.2.3.1.11.1.1.<outlet ID> outletControlAction
+ Write 0=no action, 1=on, 2=off, 3=reboot
+ """
+
+ oid_device = (1718, 3, 2, 3, 1)
+ oid_tower_infeed_idx = (1, 1, )
+ oid_power_status = (5,)
+ oid_power_action = (11,)
+
+ status_off = 0
+ status_on = 1
+ status_off_wait = 2
+ status_on_wait = 3
+
+ value_power_on = 1
+ value_power_off = 2
+
+ def __init__(self, *args, **kwargs):
+ super(SNMPDriverServerTechSentry3, self).__init__(*args, **kwargs)
+ # Due to its use of different OIDs for different actions, we only form
+ # an OID that holds the common substring of the OIDs for power
+ # operations.
+ self.oid_base = self.oid_enterprise + self.oid_device
+
+ def _snmp_oid(self, oid):
+ """Return the OID for one of the outlet control objects.
+
+ :param oid: The action-dependent portion of the OID, as a tuple of
+ integers.
+ :returns: The full OID as a tuple of integers.
+ """
+
+ outlet = self.snmp_info['outlet']
+ full_oid = self.oid_base + oid + self.oid_tower_infeed_idx + (outlet,)
+ return full_oid
+
+ def _snmp_power_state(self):
+ oid = self._snmp_oid(self.oid_power_status)
+ state = self.client.get(oid)
+
+ # Translate the state to an Ironic power state.
+ if state in (self.status_on, self.status_off_wait):
+ power_state = states.POWER_ON
+ elif state in (self.status_off, self.status_on_wait):
+ power_state = states.POWER_OFF
+ else:
+ LOG.warning("SeverTech Sentry3 PDU %(addr)s oid %(oid) outlet "
+ "%(outlet)s: unrecognised power state %(state)s.",
+ {'addr': self.snmp_info['address'],
+ 'oid': oid,
+ 'outlet': self.snmp_info['outlet'],
+ 'state': state})
+ power_state = states.ERROR
+
+ return power_state
+
+ def _snmp_power_on(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_on)
+ self.client.set(oid, value)
+
+ def _snmp_power_off(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_off)
+ self.client.set(oid, value)
+
+
+class SNMPDriverServerTechSentry4(SNMPDriverBase):
+ """SNMP driver class for Server Technology Sentry 4 PDUs.
+
+ https://www.servertech.com/support/sentry-mib-oid-tree-downloads
+
+ SNMP objects for Server Technology Power PDU.
+ 1.3.6.1.4.1.1718.4.1.8.5.1.1<outlet ID> outletStatus
+ notSet (0) fixedOn (1) idleOff (2) idleOn (3) [...more options follow]
+ pendOn (8) pendOff (9) off (10) on (11) [...more options follow]
+ eventOff (16) eventOn (17) eventReboot (18) eventShutdown (19)
+ 1.3.6.1.4.1.1718.4.1.8.5.1.2.<outlet ID> outletControlAction
+ Write 0=no action, 1=on, 2=off, 3=reboot
+ """
+
+ oid_device = (1718, 4, 1, 8, 5, 1)
+ oid_tower_infeed_idx = (1, 1, )
+ oid_power_status = (1,)
+ oid_power_action = (2,)
+
+ notSet = 0
+ fixedOn = 1
+ idleOff = 2
+ idleOn = 3
+ wakeOff = 4
+ wakeOn = 5
+ ocpOff = 6
+ ocpOn = 7
+ status_pendOn = 8
+ status_pendOff = 9
+ status_off = 10
+ status_on = 11
+ reboot = 12
+ shutdown = 13
+ lockedOff = 14
+ lockedOn = 15
+
+ value_power_on = 1
+ value_power_off = 2
+
+ def __init__(self, *args, **kwargs):
+ super(SNMPDriverServerTechSentry4, self).__init__(*args, **kwargs)
+ # Due to its use of different OIDs for different actions, we only form
+ # an OID that holds the common substring of the OIDs for power
+ # operations.
+ self.oid_base = self.oid_enterprise + self.oid_device
+
+ def _snmp_oid(self, oid):
+ """Return the OID for one of the outlet control objects.
+
+ :param oid: The action-dependent portion of the OID, as a tuple of
+ integers.
+ :returns: The full OID as a tuple of integers.
+ """
+
+ outlet = self.snmp_info['outlet']
+ full_oid = self.oid_base + oid + self.oid_tower_infeed_idx + (outlet,)
+ return full_oid
+
+ def _snmp_power_state(self):
+ oid = self._snmp_oid(self.oid_power_status)
+ state = self.client.get(oid)
+
+ # Translate the state to an Ironic power state.
+ if state in (self.status_on, self.status_pendOn, self.idleOn):
+ power_state = states.POWER_ON
+ elif state in (self.status_off, self.status_pendOff):
+ power_state = states.POWER_OFF
+ else:
+ LOG.warning("ServerTech Sentry4 PDU %(addr)s oid %(oid)s outlet "
+ "%(outlet)s: unrecognised power state %(state)s.",
+ {'addr': self.snmp_info['address'],
+ 'oid': oid,
+ 'outlet': self.snmp_info['outlet'],
+ 'state': state})
+ power_state = states.ERROR
+
+ return power_state
+
+ def _snmp_power_on(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_on)
+ self.client.set(oid, value)
+
+ def _snmp_power_off(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_off)
+ self.client.set(oid, value)
+
+
+class SNMPDriverRaritanPDU2(SNMPDriverBase):
+ """SNMP driver class for Raritan PDU2 PDUs.
+
+ http://support.raritan.com/px2/version-2.4.1/mibs/pdu2-mib-020400-39592.txt
+ http://cdn.raritan.com/download/PX/v1.5.20/PDU-MIB.txt
+
+ Command:
+ snmpset -v2c -c private -m+PDU2-MIB <pdu IP address> \
+ PDU2-MIB::switchingOperation.1.4 = cycle
+ snmpset -v2c -c private <pdu IP address> \
+ .1.3.6.1.4.1.13742.6.4.1.2.1.2.1.4 i 2
+ Output:
+ PDU2-MIB::switchingOperation.1.4 = INTEGER: cycle(2)
+ """
+
+ oid_device = (13742, 6, 4, 1, 2, 1)
+ oid_power_action = (2, )
+ oid_power_status = (3, )
+ oid_tower_infeed_idx = (1, )
+
+ unavailable = -1
+ status_open = 0
+ status_closed = 1
+ belowLowerCritical = 2
+ belowLowerWarning = 3
+ status_normal = 4
+ aboveUpperWarning = 5
+ aboveUpperCritical = 6
+ status_on = 7
+ status_off = 8
+ detected = 9
+ notDetected = 10
+ alarmed = 11
+ ok = 12
+ marginal = 13
+ fail = 14
+ yes = 15
+ no = 16
+ standby = 17
+ one = 18
+ two = 19
+ inSync = 20
+ outOfSync = 21
+
+ value_power_on = 1
+ value_power_off = 0
+
+ def __init__(self, *args, **kwargs):
+ super(SNMPDriverRaritanPDU2, self).__init__(*args, **kwargs)
+ # Due to its use of different OIDs for different actions, we only form
+ # an OID that holds the common substring of the OIDs for power
+ # operations.
+ self.oid_base = self.oid_enterprise + self.oid_device
+
+ def _snmp_oid(self, oid):
+ """Return the OID for one of the outlet control objects.
+
+ :param oid: The action-dependent portion of the OID, as a tuple of
+ integers.
+ :returns: The full OID as a tuple of integers.
+ """
+
+ outlet = self.snmp_info['outlet']
+ full_oid = self.oid_base + oid + self.oid_tower_infeed_idx + (outlet,)
+ return full_oid
+
+ def _snmp_power_state(self):
+ oid = self._snmp_oid(self.oid_power_status)
+ state = self.client.get(oid)
+
+ # Translate the state to an Ironic power state.
+ if state == self.status_on:
+ power_state = states.POWER_ON
+ elif state == self.status_off:
+ power_state = states.POWER_OFF
+ else:
+ LOG.warning("Raritan PDU2 PDU %(addr)s oid %(oid)s outlet "
+ "%(outlet)s: unrecognised power state %(state)s.",
+ {'addr': self.snmp_info['address'],
+ 'oid': oid,
+ 'outlet': self.snmp_info['outlet'],
+ 'state': state})
+ power_state = states.ERROR
+
+ return power_state
+
+ def _snmp_power_on(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_on)
+ self.client.set(oid, value)
+
+ def _snmp_power_off(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_off)
+ self.client.set(oid, value)
+
+
+class SNMPDriverVertivGeistPDU(SNMPDriverBase):
+ """SNMP driver class for VertivGeist NU30017L/NU30019L PDU.
+
+ https://mibs.observium.org/mib/GEIST-V5-MIB/
+
+ """
+
+ oid_device = (21239, 5, 2, 3, 5, 1)
+ oid_power_action = (6, )
+ oid_power_status = (4, )
+ oid_tower_infeed_idx = (1, )
+
+ on = 1
+ off = 2
+ on2off = 3
+ off2on = 4
+ rebootOn = 5
+ rebootOff = 5
+ unavailable = 7
+
+ value_power_on = 2
+ value_power_off = 4
+
+ def __init__(self, *args, **kwargs):
+ super(SNMPDriverVertivGeistPDU, self).__init__(*args, **kwargs)
+ # Due to its use of different OIDs for different actions, we only form
+ # an OID that holds the common substring of the OIDs for power
+ # operations.
+ self.oid_base = self.oid_enterprise + self.oid_device
+
+ def _snmp_oid(self, oid):
+ """Return the OID for one of the outlet control objects.
+
+ :param oid: The action-dependent portion of the OID, as a tuple of
+ integers.
+
+ :returns: The full OID as a tuple of integers.
+ """
+
+ outlet = self.snmp_info['outlet']
+ full_oid = self.oid_base + oid + (outlet,)
+ return full_oid
+
+ def _snmp_power_state(self):
+ oid = self._snmp_oid(self.oid_power_status)
+ state = self.client.get(oid)
+
+ # Translate the state to an Ironic power state.
+ if state in (self.on, self.on2off):
+ power_state = states.POWER_ON
+ elif state in (self.off, self.off2on):
+ power_state = states.POWER_OFF
+ else:
+ LOG.warning("Vertiv Geist PDU %(addr)s oid %(oid)s outlet "
+ "%(outlet)s: unrecognised power state %(state)s.",
+ {'addr': self.snmp_info['address'],
+ 'oid': oid,
+ 'outlet': self.snmp_info['outlet'],
+ 'state': state})
+ power_state = states.ERROR
+
+ return power_state
+
+ def _snmp_power_on(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_on)
+ self.client.set(oid, value)
+
+ def _snmp_power_off(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_off)
+ self.client.set(oid, value)
+
+
class SNMPDriverAuto(SNMPDriverBase):
SYS_OBJ_OID = (1, 3, 6, 1, 2, 1, 1, 2)
@@ -878,6 +1213,10 @@ DRIVER_CLASSES = {
'eatonpower': SNMPDriverEatonPower,
'teltronix': SNMPDriverTeltronix,
'baytech_mrp27': SNMPDriverBaytechMRP27,
+ 'servertech_sentry3': SNMPDriverServerTechSentry3,
+ 'servertech_sentry4': SNMPDriverServerTechSentry4,
+ 'raritan_pdu2': SNMPDriverRaritanPDU2,
+ 'vertivgeist_pdu': SNMPDriverVertivGeistPDU,
'auto': SNMPDriverAuto,
}
diff --git a/ironic/tests/base.py b/ironic/tests/base.py
index ba43461b6..348f15c20 100644
--- a/ironic/tests/base.py
+++ b/ironic/tests/base.py
@@ -27,6 +27,7 @@ import subprocess
import sys
import tempfile
from unittest import mock
+import warnings
import eventlet
eventlet.monkey_patch(os=False)
@@ -38,6 +39,7 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslotest import base as oslo_test_base
+from sqlalchemy import exc as sqla_exc
from ironic.common import config as ironic_config
from ironic.common import context as ironic_context
@@ -70,6 +72,84 @@ def _patch_mock_callable(obj):
return False
+class WarningsFixture(fixtures.Fixture):
+ """Filters out warnings during test runs."""
+
+ def setUp(self):
+ super().setUp()
+
+ self._original_warning_filters = warnings.filters[:]
+
+ # NOTE(sdague): Make deprecation warnings only happen once. Otherwise
+ # this gets kind of crazy given the way that upstream python libs use
+ # this.
+ warnings.simplefilter('once', DeprecationWarning)
+
+ # NOTE(stephenfin): We get way too many of these. Silence them.
+ warnings.filterwarnings(
+ 'ignore',
+ message=(
+ 'Policy enforcement is depending on the value of .*. '
+ 'This key is deprecated. Please update your policy '
+ 'file to use the standard policy values.'
+ ),
+ )
+
+ # NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
+ warnings.filterwarnings(
+ 'ignore',
+ message='Policy .* failed scope check',
+ category=UserWarning,
+ )
+
+ # Enable deprecation warnings to capture upcoming SQLAlchemy changes
+
+ warnings.filterwarnings(
+ 'ignore',
+ category=sqla_exc.SADeprecationWarning,
+ )
+
+ warnings.filterwarnings(
+ 'error',
+ module='ironic',
+ category=sqla_exc.SADeprecationWarning,
+ )
+
+ # Enable general SQLAlchemy warnings also to ensure we're not doing
+ # silly stuff. It's possible that we'll need to filter things out here
+ # with future SQLAlchemy versions, but that's a good thing
+
+ warnings.filterwarnings(
+ 'error',
+ module='ironic',
+ category=sqla_exc.SAWarning,
+ )
+
+ # ...but filter everything out until we get around to fixing them
+ # TODO(stephenfin): Fix all of these
+
+ warnings.filterwarnings(
+ 'ignore',
+ module='ironic',
+ message='SELECT statement has a cartesian product ',
+ category=sqla_exc.SAWarning,
+ )
+
+ # FIXME(stephenfin): We can remove this once oslo.db is fixed
+ # https://review.opendev.org/c/openstack/oslo.db/+/856453
+ warnings.filterwarnings(
+ 'ignore',
+ module='ironic',
+ message='TypeDecorator .* will not produce a cache key',
+ category=sqla_exc.SAWarning,
+ )
+
+ self.addCleanup(self._reset_warning_filters)
+
+ def _reset_warning_filters(self):
+ warnings.filters[:] = self._original_warning_filters
+
+
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
@@ -113,6 +193,7 @@ class TestCase(oslo_test_base.BaseTestCase):
self.addCleanup(hash_ring.HashRingManager().reset)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
+ self.useFixture(WarningsFixture())
driver_factory.HardwareTypesFactory._extension_manager = None
for factory in driver_factory._INTERFACE_LOADERS.values():
@@ -159,7 +240,7 @@ class TestCase(oslo_test_base.BaseTestCase):
values = ['fake']
if iface == 'deploy':
- values.extend(['direct', 'anaconda'])
+ values.extend(['direct', 'ramdisk', 'anaconda'])
elif iface == 'boot':
values.append('pxe')
elif iface == 'storage':
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index 3c913834e..6531f36e7 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -600,6 +600,23 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertCountEqual(['driver_info', 'links'], data)
self.assertEqual('******', data['driver_info']['fake_password'])
+ def test_get_one_with_deleted_chassis(self):
+ node = obj_utils.create_test_node(self.context,
+ chassis_id=self.chassis.id)
+ with mock.patch.object(self.dbapi,
+ 'get_chassis_by_id',
+ autospec=True) as mock_gc:
+ # Explicitly return a chassis not found, and make sure the API
+ # hides this from the API consumer as this is likely just an
+ # in-flight deletion across multiple DB sessions or different
+ # API surfaces (or, just slow DB replication.)
+ mock_gc.side_effect = exception.ChassisNotFound(
+ chassis=self.chassis.id)
+ data = self.get_json(
+ '/nodes/%s' % node.uuid,
+ headers={api_base.Version.string: str(api_v1.max_version())})
+ self.assertIsNone(data['chassis_uuid'])
+
def test_get_network_interface_fields_invalid_api_version(self):
node = obj_utils.create_test_node(self.context,
chassis_id=self.chassis.id)
@@ -4881,13 +4898,39 @@ class TestPost(test_api_base.BaseApiTest):
ndict = test_api_utils.post_get_test_node(owner='cowsay')
response = self.post_json('/nodes', ndict,
headers={api_base.Version.string:
- str(api_v1.max_version())})
+ str(api_v1.max_version()),
+ 'X-Project-Id': 'cowsay'})
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/nodes/%s' % ndict['uuid'],
headers={api_base.Version.string:
str(api_v1.max_version())})
self.assertEqual('cowsay', result['owner'])
+ def test_create_node_owner_system_scope(self):
+ ndict = test_api_utils.post_get_test_node(owner='catsay')
+ response = self.post_json('/nodes', ndict,
+ headers={api_base.Version.string:
+ str(api_v1.max_version()),
+ 'OpenStack-System-Scope': 'all',
+ 'X-Roles': 'admin'})
+ self.assertEqual(http_client.CREATED, response.status_int)
+ result = self.get_json('/nodes/%s' % ndict['uuid'],
+ headers={api_base.Version.string:
+ str(api_v1.max_version())})
+ self.assertEqual('catsay', result['owner'])
+
+ def test_create_node_owner_recorded_project_scope(self):
+ ndict = test_api_utils.post_get_test_node()
+ response = self.post_json('/nodes', ndict,
+ headers={api_base.Version.string:
+ str(api_v1.max_version()),
+ 'X-Project-Id': 'ravensay'})
+ self.assertEqual(http_client.CREATED, response.status_int)
+ result = self.get_json('/nodes/%s' % ndict['uuid'],
+ headers={api_base.Version.string:
+ str(api_v1.max_version())})
+ self.assertEqual('ravensay', result['owner'])
+
def test_create_node_owner_old_api_version(self):
headers = {api_base.Version.string: '1.32'}
ndict = test_api_utils.post_get_test_node(owner='bob')
diff --git a/ironic/tests/unit/api/test_acl.py b/ironic/tests/unit/api/test_acl.py
index 5793e95a8..cdc20d477 100644
--- a/ironic/tests/unit/api/test_acl.py
+++ b/ironic/tests/unit/api/test_acl.py
@@ -81,10 +81,18 @@ class TestACLBase(base.BaseApiTest):
body=None, assert_status=None,
assert_dict_contains=None,
assert_list_length=None,
- deprecated=None):
+ deprecated=None,
+ self_manage_nodes=True):
path = path.format(**self.format_data)
self.mock_auth.side_effect = self._fake_process_request
+ # Set self management override
+ if not self_manage_nodes:
+ cfg.CONF.set_override(
+ 'project_admin_can_manage_own_nodes',
+ False,
+ 'api')
+
# always request the latest api version
version = api_versions.max_version_string()
rheaders = {
diff --git a/ironic/tests/unit/api/test_audit.py b/ironic/tests/unit/api/test_audit.py
index d85ed3e85..cedeabf17 100644
--- a/ironic/tests/unit/api/test_audit.py
+++ b/ironic/tests/unit/api/test_audit.py
@@ -35,7 +35,7 @@ class TestAuditMiddleware(base.BaseApiTest):
@mock.patch.object(audit, 'AuditMiddleware', autospec=True)
def test_enable_audit_request(self, mock_audit):
- CONF.audit.enabled = True
+ CONF.set_override('enabled', True, 'audit')
self._make_app()
mock_audit.assert_called_once_with(
mock.ANY,
@@ -44,14 +44,13 @@ class TestAuditMiddleware(base.BaseApiTest):
@mock.patch.object(audit, 'AuditMiddleware', autospec=True)
def test_enable_audit_request_error(self, mock_audit):
- CONF.audit.enabled = True
+ CONF.set_override('enabled', True, 'audit')
mock_audit.side_effect = IOError("file access error")
-
self.assertRaises(exception.InputFileError,
self._make_app)
@mock.patch.object(audit, 'AuditMiddleware', autospec=True)
def test_disable_audit_request(self, mock_audit):
- CONF.audit.enabled = False
+ CONF.set_override('enabled', False, 'audit')
self._make_app()
self.assertFalse(mock_audit.called)
diff --git a/ironic/tests/unit/api/test_ospmiddleware.py b/ironic/tests/unit/api/test_ospmiddleware.py
index 555251dd7..f814d5688 100644
--- a/ironic/tests/unit/api/test_ospmiddleware.py
+++ b/ironic/tests/unit/api/test_ospmiddleware.py
@@ -32,12 +32,12 @@ class TestOsprofilerWsgiMiddleware(base.BaseApiTest):
@mock.patch.object(web, 'WsgiMiddleware', autospec=True)
def test_enable_osp_wsgi_request(self, mock_ospmiddleware):
- CONF.profiler.enabled = True
+ CONF.set_override('enabled', True, 'profiler')
self._make_app()
mock_ospmiddleware.assert_called_once_with(mock.ANY)
@mock.patch.object(web, 'WsgiMiddleware', autospec=True)
def test_disable_osp_wsgi_request(self, mock_ospmiddleware):
- CONF.profiler.enabled = False
+ CONF.set_override('enabled', False, 'profiler')
self._make_app()
self.assertFalse(mock_ospmiddleware.called)
diff --git a/ironic/tests/unit/api/test_rbac_project_scoped.yaml b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
index 802600703..b55439ad1 100644
--- a/ironic/tests/unit/api/test_rbac_project_scoped.yaml
+++ b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
@@ -89,35 +89,71 @@ owner_admin_cannot_post_nodes:
body: &node_post_body
name: node
driver: fake-driverz
- assert_status: 500
+ assert_status: 403
+ self_manage_nodes: False
+
+owner_admin_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *owner_admin_headers
+ body: *node_post_body
+ assert_status: 503
+ self_manage_nodes: True
owner_manager_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *owner_manager_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
lessee_admin_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *lessee_admin_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
+ self_manage_nodes: False
+
+lessee_admin_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *lessee_admin_headers
+ body: *node_post_body
+ assert_status: 403
+ self_manage_nodes: False
lessee_manager_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *lessee_manager_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
+ self_manage_nodes: False
+
+lessee_manager_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *lessee_manager_headers
+ body: *node_post_body
+ assert_status: 403
+ self_manage_nodes: True
third_party_admin_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *third_party_admin_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
+ self_manage_nodes: False
+
+third_party_admin_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *third_party_admin_headers
+ body: *node_post_body
+ assert_status: 503
+ self_manage_nodes: True
# Based on nodes_post_member
owner_member_cannot_post_nodes:
@@ -125,7 +161,7 @@ owner_member_cannot_post_nodes:
method: post
headers: *owner_member_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
# Based on nodes_post_reader
owner_reader_cannot_post_reader:
@@ -133,7 +169,7 @@ owner_reader_cannot_post_reader:
method: post
headers: *owner_reader_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
# Based on nodes_get_admin
# TODO: Create 3 nodes, 2 owned, 1 leased where it is also owned.
@@ -671,6 +707,14 @@ owner_admin_cannot_delete_nodes:
method: delete
headers: *owner_admin_headers
assert_status: 403
+ self_manage_nodes: False
+
+owner_admin_can_delete_nodes:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: delete
+ headers: *owner_admin_headers
+ assert_status: 503
+ self_manage_nodes: True
owner_manager_cannot_delete_nodes:
path: '/v1/nodes/{owner_node_ident}'
diff --git a/ironic/tests/unit/common/test_molds.py b/ironic/tests/unit/common/test_molds.py
index bd2c37e47..810dd61bc 100644
--- a/ironic/tests/unit/common/test_molds.py
+++ b/ironic/tests/unit/common/test_molds.py
@@ -38,7 +38,7 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
mock_session = mock.Mock()
mock_session.get_token.return_value = 'token'
mock_swift.return_value = mock_session
- cfg.CONF.molds.storage = 'swift'
+ cfg.CONF.set_override('storage', 'swift', 'molds')
url = 'https://example.com/file1'
data = {'key': 'value'}
@@ -54,7 +54,7 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
mock_session = mock.Mock()
mock_session.get_token.return_value = None
mock_swift.return_value = mock_session
- cfg.CONF.molds.storage = 'swift'
+ cfg.CONF.set_override('storage', 'swift', 'molds')
url = 'https://example.com/file1'
data = {'key': 'value'}
@@ -66,9 +66,9 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_http(self, mock_put):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = 'user'
- cfg.CONF.molds.password = 'password'
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', 'user', 'molds')
+ cfg.CONF.set_override('password', 'password', 'molds')
url = 'https://example.com/file1'
data = {'key': 'value'}
@@ -81,9 +81,9 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_http_noauth(self, mock_put):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = None
- cfg.CONF.molds.password = None
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', None, 'molds')
+ cfg.CONF.set_override('password', None, 'molds')
url = 'https://example.com/file1'
data = {'key': 'value'}
@@ -95,9 +95,9 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_http_error(self, mock_put):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = 'user'
- cfg.CONF.molds.password = 'password'
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', 'user', 'molds')
+ cfg.CONF.set_override('password', 'password', 'molds')
response = mock.MagicMock()
response.status_code = 404
response.raise_for_status.side_effect = requests.exceptions.HTTPError
@@ -116,11 +116,11 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_connection_error(self, mock_put):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = 'user'
- cfg.CONF.molds.password = 'password'
- cfg.CONF.molds.retry_interval = 0
- cfg.CONF.molds.retry_attempts = 3
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', 'user', 'molds')
+ cfg.CONF.set_override('password', 'password', 'molds')
+ cfg.CONF.set_override('retry_interval', 0, 'molds')
+ cfg.CONF.set_override('retry_attempts', 3, 'molds')
response = mock.MagicMock()
mock_put.side_effect = [
requests.exceptions.ConnectTimeout,
@@ -137,11 +137,11 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_connection_error_exceeded(self, mock_put):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = 'user'
- cfg.CONF.molds.password = 'password'
- cfg.CONF.molds.retry_interval = 0
- cfg.CONF.molds.retry_attempts = 2
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', 'user', 'molds')
+ cfg.CONF.set_override('password', 'password', 'molds')
+ cfg.CONF.set_override('retry_interval', 0, 'molds')
+ cfg.CONF.set_override('retry_attempts', 2, 'molds')
mock_put.side_effect = [
requests.exceptions.ConnectTimeout,
requests.exceptions.ConnectionError]
@@ -164,7 +164,7 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
mock_session = mock.Mock()
mock_session.get_token.return_value = 'token'
mock_swift.return_value = mock_session
- cfg.CONF.molds.storage = 'swift'
+ cfg.CONF.set_override('storage', 'swift', 'molds')
response = mock.MagicMock()
response.status_code = 200
response.content = "{'key': 'value'}"
@@ -185,7 +185,7 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
mock_session = mock.Mock()
mock_session.get_token.return_value = None
mock_swift.return_value = mock_session
- cfg.CONF.molds.storage = 'swift'
+ cfg.CONF.set_override('storage', 'swift', 'molds')
url = 'https://example.com/file1'
with task_manager.acquire(self.context, self.node.uuid) as task:
@@ -196,9 +196,9 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'get', autospec=True)
def test_get_configuration_http(self, mock_get):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = 'user'
- cfg.CONF.molds.password = 'password'
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', 'user', 'molds')
+ cfg.CONF.set_override('password', 'password', 'molds')
response = mock.MagicMock()
response.status_code = 200
response.content = "{'key': 'value'}"
@@ -215,9 +215,9 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'get', autospec=True)
def test_get_configuration_http_noauth(self, mock_get):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = None
- cfg.CONF.molds.password = None
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', None, 'molds')
+ cfg.CONF.set_override('password', None, 'molds')
response = mock.MagicMock()
response.status_code = 200
response.content = "{'key': 'value'}"
@@ -233,9 +233,9 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'get', autospec=True)
def test_get_configuration_http_error(self, mock_get):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = 'user'
- cfg.CONF.molds.password = 'password'
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', 'user', 'molds')
+ cfg.CONF.set_override('password', 'password', 'molds')
response = mock.MagicMock()
response.status_code = 404
response.raise_for_status.side_effect = requests.exceptions.HTTPError
@@ -253,11 +253,11 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'get', autospec=True)
def test_get_configuration_connection_error(self, mock_get):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = 'user'
- cfg.CONF.molds.password = 'password'
- cfg.CONF.molds.retry_interval = 0
- cfg.CONF.molds.retry_attempts = 3
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', 'user', 'molds')
+ cfg.CONF.set_override('password', 'password', 'molds')
+ cfg.CONF.set_override('retry_interval', 0, 'molds')
+ cfg.CONF.set_override('retry_attempts', 3, 'molds')
response = mock.MagicMock()
mock_get.side_effect = [
requests.exceptions.ConnectTimeout,
@@ -274,11 +274,11 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'get', autospec=True)
def test_get_configuration_mold_connection_error_exceeded(self, mock_get):
- cfg.CONF.molds.storage = 'http'
- cfg.CONF.molds.user = 'user'
- cfg.CONF.molds.password = 'password'
- cfg.CONF.molds.retry_interval = 0
- cfg.CONF.molds.retry_attempts = 2
+ cfg.CONF.set_override('storage', 'http', 'molds')
+ cfg.CONF.set_override('user', 'user', 'molds')
+ cfg.CONF.set_override('password', 'password', 'molds')
+ cfg.CONF.set_override('retry_interval', 0, 'molds')
+ cfg.CONF.set_override('retry_attempts', 2, 'molds')
mock_get.side_effect = [
requests.exceptions.ConnectTimeout,
requests.exceptions.ConnectionError]
@@ -296,7 +296,7 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'get', autospec=True)
def test_get_configuration_empty(self, mock_get):
- cfg.CONF.molds.storage = 'http'
+ cfg.CONF.set_override('storage', 'http', 'molds')
response = mock.MagicMock()
response.status_code = 200
response.content = ''
@@ -309,7 +309,7 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
@mock.patch.object(requests, 'get', autospec=True)
def test_get_configuration_invalid_json(self, mock_get):
- cfg.CONF.molds.storage = 'http'
+ cfg.CONF.set_override('storage', 'http', 'molds')
response = mock.MagicMock()
response.status_code = 200
response.content = 'not json'
diff --git a/ironic/tests/unit/common/test_pxe_utils.py b/ironic/tests/unit/common/test_pxe_utils.py
index 6b1339894..b775c68a1 100644
--- a/ironic/tests/unit/common/test_pxe_utils.py
+++ b/ironic/tests/unit/common/test_pxe_utils.py
@@ -28,6 +28,7 @@ from oslo_utils import uuidutils
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import image_service
+from ironic.common import image_service as base_image_service
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
@@ -1301,25 +1302,6 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
self.assertEqual('instance_ramdisk_uuid',
task.node.instance_info['ramdisk'])
- def test_get_instance_image_info(self):
- # Tests when 'is_whole_disk_image' exists in driver_internal_info
- # NOTE(TheJulia): The method being tested is primarily geared for
- # only netboot operation as the information should only need to be
- # looked up again during network booting.
- self.config(group="deploy", default_boot_option="netboot")
- self._test_get_instance_image_info()
-
- def test_get_instance_image_info_without_is_whole_disk_image(self):
- # NOTE(TheJulia): The method being tested is primarily geared for
- # only netboot operation as the information should only need to be
- # looked up again during network booting.
- self.config(group="deploy", default_boot_option="netboot")
- # Tests when 'is_whole_disk_image' doesn't exists in
- # driver_internal_info
- del self.node.driver_internal_info['is_whole_disk_image']
- self.node.save()
- self._test_get_instance_image_info()
-
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
return_value='local', autospec=True)
def test_get_instance_image_info_localboot(self, boot_opt_mock):
@@ -1381,7 +1363,7 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
'LiveOS',
'squashfs.img')),
'ks_template':
- (CONF.anaconda.default_ks_template,
+ ('file://' + CONF.anaconda.default_ks_template,
os.path.join(CONF.deploy.http_root,
self.node.uuid,
'ks.cfg.template')),
@@ -1399,7 +1381,7 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
self.assertEqual(expected_info, image_info)
# In the absense of kickstart template in both instance_info and
# image default kickstart template is used
- self.assertEqual(CONF.anaconda.default_ks_template,
+ self.assertEqual('file://' + CONF.anaconda.default_ks_template,
image_info['ks_template'][0])
calls = [mock.call(task.node), mock.call(task.node)]
boot_opt_mock.assert_has_calls(calls)
@@ -1415,25 +1397,23 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
return_value='kickstart', autospec=True)
- @mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
- def test_get_instance_image_info_with_kickstart_url(
+ @mock.patch.object(base_image_service.HttpImageService, 'show',
+ autospec=True)
+ def test_get_instance_image_info_with_kickstart_url_http(
self, image_show_mock, boot_opt_mock):
- properties = {'properties': {u'kernel_id': u'instance_kernel_uuid',
- u'ramdisk_id': u'instance_ramdisk_uuid',
- u'image_source': u'http://path/to/os/'}}
-
+ properties = {'properties': {}}
expected_info = {'ramdisk':
- ('instance_ramdisk_uuid',
+ ('http://fake.url/ramdisk',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'ramdisk')),
'kernel':
- ('instance_kernel_uuid',
+ ('http://fake.url/kernel',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'kernel')),
'ks_template':
- (CONF.anaconda.default_ks_template,
+ ('file://' + CONF.anaconda.default_ks_template,
os.path.join(CONF.deploy.http_root,
self.node.uuid,
'ks.cfg.template')),
@@ -1449,13 +1429,18 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
dii = task.node.driver_internal_info
dii['is_source_a_path'] = True
task.node.driver_internal_info = dii
+ i_info = task.node.instance_info
+ i_info['image_source'] = 'http://fake.url/path'
+ i_info['kernel'] = 'http://fake.url/kernel'
+ i_info['ramdisk'] = 'http://fake.url/ramdisk'
+ task.node.instance_info = i_info
task.node.save()
image_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=False)
self.assertEqual(expected_info, image_info)
# In the absense of kickstart template in both instance_info and
# image default kickstart template is used
- self.assertEqual(CONF.anaconda.default_ks_template,
+ self.assertEqual('file://' + CONF.anaconda.default_ks_template,
image_info['ks_template'][0])
calls = [mock.call(task.node), mock.call(task.node)]
boot_opt_mock.assert_has_calls(calls)
@@ -1542,6 +1527,46 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
list(fake_pxe_info.values()),
True)
+ @mock.patch.object(os, 'chmod', autospec=True)
+ @mock.patch.object(pxe_utils, 'TFTPImageCache', lambda: None)
+ @mock.patch.object(pxe_utils, 'ensure_tree', autospec=True)
+ @mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
+ def test_cache_ramdisk_kernel_ipxe_anaconda(self, mock_fetch_image,
+ mock_ensure_tree, mock_chmod):
+ expected_path = os.path.join(CONF.deploy.http_root,
+ self.node.uuid)
+ fake_pxe_info = {'ramdisk':
+ ('instance_ramdisk_uuid',
+ os.path.join(CONF.pxe.tftp_root,
+ self.node.uuid,
+ 'ramdisk')),
+ 'kernel':
+ ('instance_kernel_uuid',
+ os.path.join(CONF.pxe.tftp_root,
+ self.node.uuid,
+ 'kernel')),
+ 'ks_template':
+ ('file://' + CONF.anaconda.default_ks_template,
+ os.path.join(CONF.deploy.http_root,
+ self.node.uuid,
+ 'ks.cfg.template')),
+ 'ks_cfg':
+ ('',
+ os.path.join(CONF.deploy.http_root,
+ self.node.uuid,
+ 'ks.cfg'))}
+ expected = fake_pxe_info.copy()
+ expected.pop('ks_cfg')
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info,
+ ipxe_enabled=True)
+ mock_ensure_tree.assert_called_with(expected_path)
+ mock_fetch_image.assert_called_once_with(self.context, mock.ANY,
+ list(expected.values()),
+ True)
+
@mock.patch.object(pxe.PXEBoot, '__init__', lambda self: None)
class PXEBuildKickstartConfigOptionsTestCase(db_base.DbTestCase):
@@ -1573,6 +1598,47 @@ class PXEBuildKickstartConfigOptionsTestCase(db_base.DbTestCase):
self.assertTrue(params['ks_options'].pop('agent_token'))
self.assertEqual(expected, params['ks_options'])
+ @mock.patch.object(deploy_utils, 'get_ironic_api_url', autospec=True)
+ def test_build_kickstart_config_options_pxe_source_path(self,
+ api_url_mock):
+ api_url_mock.return_value = 'http://ironic-api'
+ d_info = self.node.driver_internal_info
+ d_info['is_source_a_path'] = True
+ self.node.driver_internal_info = d_info
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ expected = {}
+ expected['liveimg_url'] = task.node.instance_info['image_url']
+ expected['config_drive'] = ''
+ expected['heartbeat_url'] = (
+ 'http://ironic-api/v1/heartbeat/%s' % task.node.uuid
+ )
+ expected['is_source_a_path'] = 'true'
+ params = pxe_utils.build_kickstart_config_options(task)
+ self.assertTrue(params['ks_options'].pop('agent_token'))
+ self.assertEqual(expected, params['ks_options'])
+ self.assertNotIn('insecure_heartbeat', params)
+
+ @mock.patch.object(deploy_utils, 'get_ironic_api_url', autospec=True)
+ def test_build_kickstart_config_options_pxe_insecure_heartbeat(
+ self, api_url_mock):
+ api_url_mock.return_value = 'http://ironic-api'
+ self.assertFalse(CONF.anaconda.insecure_heartbeat)
+ CONF.set_override('insecure_heartbeat', True, 'anaconda')
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ expected = {}
+ expected['liveimg_url'] = task.node.instance_info['image_url']
+ expected['config_drive'] = ''
+ expected['heartbeat_url'] = (
+ 'http://ironic-api/v1/heartbeat/%s' % task.node.uuid
+ )
+ expected['insecure_heartbeat'] = 'true'
+ params = pxe_utils.build_kickstart_config_options(task)
+ self.assertTrue(params['ks_options'].pop('agent_token'))
+ self.assertEqual(expected, params['ks_options'])
+
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_prepare_instance_kickstart_config_not_anaconda_boot(self,
render_mock):
diff --git a/ironic/tests/unit/conductor/test_cleaning.py b/ironic/tests/unit/conductor/test_cleaning.py
index 65261450a..a4c3d57b6 100644
--- a/ironic/tests/unit/conductor/test_cleaning.py
+++ b/ironic/tests/unit/conductor/test_cleaning.py
@@ -51,8 +51,6 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
'step': 'build_raid', 'priority': 0, 'interface': 'deploy'}
def __do_node_clean_validate_fail(self, mock_validate, clean_steps=None):
- # InvalidParameterValue should cause node to go to CLEANFAIL
- mock_validate.side_effect = exception.InvalidParameterValue('error')
tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
@@ -68,26 +66,42 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
self.assertIsNone(node.fault)
mock_validate.assert_called_once_with(mock.ANY, mock.ANY)
+ def __do_node_clean_validate_fail_invalid(self, mock_validate,
+ clean_steps=None):
+ # InvalidParameterValue should cause node to go to CLEANFAIL
+ mock_validate.side_effect = exception.InvalidParameterValue('error')
+ self.__do_node_clean_validate_fail(mock_validate,
+ clean_steps=clean_steps)
+
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test__do_node_clean_automated_power_validate_fail(self, mock_validate):
- self.__do_node_clean_validate_fail(mock_validate)
+ self.__do_node_clean_validate_fail_invalid(mock_validate)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test__do_node_clean_manual_power_validate_fail(self, mock_validate):
- self.__do_node_clean_validate_fail(mock_validate, clean_steps=[])
+ self.__do_node_clean_validate_fail_invalid(mock_validate,
+ clean_steps=[])
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
def test__do_node_clean_automated_network_validate_fail(self,
mock_validate):
- self.__do_node_clean_validate_fail(mock_validate)
+ self.__do_node_clean_validate_fail_invalid(mock_validate)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
def test__do_node_clean_manual_network_validate_fail(self, mock_validate):
- self.__do_node_clean_validate_fail(mock_validate, clean_steps=[])
+ self.__do_node_clean_validate_fail_invalid(mock_validate,
+ clean_steps=[])
+
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ def test__do_node_clean_network_error_fail(self, mock_validate):
+ # NetworkError should cause node to go to CLEANFAIL
+ mock_validate.side_effect = exception.NetworkError()
+ self.__do_node_clean_validate_fail(mock_validate)
@mock.patch.object(conductor_utils, 'LOG', autospec=True)
@mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index b433aa4a5..5d84dbbef 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -1829,6 +1829,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
def test_do_node_deploy_maintenance(self, mock_iwdi):
mock_iwdi.return_value = False
+ self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
maintenance=True)
exc = self.assertRaises(messaging.rpc.ExpectedException,
@@ -1843,6 +1844,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
self.assertFalse(mock_iwdi.called)
def _test_do_node_deploy_validate_fail(self, mock_validate, mock_iwdi):
+ self._start_service()
mock_iwdi.return_value = False
# InvalidParameterValue should be re-raised as InstanceDeployFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
@@ -2389,6 +2391,7 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_tear_down_validate_fail(self, mock_validate):
+ self._start_service()
# InvalidParameterValue should be re-raised as InstanceDeployFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
node = obj_utils.create_test_node(
@@ -7319,7 +7322,6 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
mock_boot_validate.assert_not_called()
- self.assertNotIn('is_whole_disk_image', task.node.driver_internal_info)
@mock.patch('ironic.common.image_service.HttpImageService.validate_href',
autospec=True)
@@ -7328,26 +7330,23 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakeBoot.validate', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
+ @mock.patch('ironic.drivers.modules.ramdisk.RamdiskDeploy.take_over',
autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ @mock.patch('ironic.drivers.modules.ramdisk.RamdiskDeploy.prepare',
autospec=True)
- def test__do_adoption_with_netboot(self,
- mock_prepare,
- mock_take_over,
- mock_start_console,
- mock_boot_validate,
- mock_power_validate,
- mock_validate_href):
+ def test__do_adoption_ramdisk_deploy(self,
+ mock_prepare,
+ mock_take_over,
+ mock_start_console,
+ mock_boot_validate,
+ mock_power_validate,
+ mock_validate_href):
"""Test a successful node adoption"""
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
- provision_state=states.ADOPTING,
- instance_info={
- 'capabilities': {'boot_option': 'netboot'},
- 'image_source': 'http://127.0.0.1/image',
- })
+ deploy_interface='ramdisk',
+ provision_state=states.ADOPTING)
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_adoption(task)
@@ -7360,10 +7359,6 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
mock_boot_validate.assert_called_once_with(task.driver.boot, task)
- self.assertTrue(task.node.driver_internal_info.get(
- 'is_whole_disk_image'))
- mock_validate_href.assert_called_once_with(mock.ANY,
- 'http://127.0.0.1/image')
@mock.patch('ironic.drivers.modules.fake.FakeBoot.validate', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
@@ -7410,9 +7405,9 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakeBoot.validate', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
+ @mock.patch('ironic.drivers.modules.ramdisk.RamdiskDeploy.take_over',
autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ @mock.patch('ironic.drivers.modules.ramdisk.RamdiskDeploy.prepare',
autospec=True)
def test__do_adoption_boot_validate_failure(self,
mock_prepare,
@@ -7428,10 +7423,8 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
- provision_state=states.ADOPTING,
- instance_info={
- 'capabilities': {'boot_option': 'netboot'},
- })
+ deploy_interface='ramdisk',
+ provision_state=states.ADOPTING)
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_adoption(task)
@@ -8384,7 +8377,6 @@ class NodeHistoryRecordCleanupTestCase(mgr_utils.ServiceSetUpMixin,
# 9 retained due to days, 3 to config
self.service._manage_node_history(self.context)
events = objects.NodeHistory.list(self.context)
- print(events)
self.assertEqual(12, len(events))
events = objects.NodeHistory.list_by_node_id(self.context, 10)
self.assertEqual(4, len(events))
@@ -8404,3 +8396,73 @@ class NodeHistoryRecordCleanupTestCase(mgr_utils.ServiceSetUpMixin,
self.assertEqual('one', events[1].event)
self.assertEqual('two', events[2].event)
self.assertEqual('three', events[3].event)
+
+
+class ConcurrentActionLimitTestCase(mgr_utils.ServiceSetUpMixin,
+ db_base.DbTestCase):
+
+ def setUp(self):
+ super(ConcurrentActionLimitTestCase, self).setUp()
+ self._start_service()
+ self.node1 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=110,
+ uuid=uuidutils.generate_uuid())
+ self.node2 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=111,
+ uuid=uuidutils.generate_uuid())
+ self.node3 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=112,
+ uuid=uuidutils.generate_uuid())
+ self.node4 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=113,
+ uuid=uuidutils.generate_uuid())
+ # Create the nodes, as the tasks need to operate across tables.
+ self.node1.create()
+ self.node2.create()
+ self.node3.create()
+ self.node4.create()
+
+ def test_concurrent_action_limit_deploy(self):
+ self.node1.provision_state = states.DEPLOYING
+ self.node2.provision_state = states.DEPLOYWAIT
+ self.node1.save()
+ self.node2.save()
+ CONF.set_override('max_concurrent_deploy', 2, group='conductor')
+ self.assertRaises(
+ exception.ConcurrentActionLimit,
+ self.service._concurrent_action_limit,
+ 'provisioning')
+ self.service._concurrent_action_limit('unprovisioning')
+ self.service._concurrent_action_limit('cleaning')
+ CONF.set_override('max_concurrent_deploy', 3, group='conductor')
+ self.service._concurrent_action_limit('provisioning')
+
+ def test_concurrent_action_limit_cleaning(self):
+ self.node1.provision_state = states.DELETING
+ self.node2.provision_state = states.CLEANING
+ self.node3.provision_state = states.CLEANWAIT
+ self.node1.save()
+ self.node2.save()
+ self.node3.save()
+
+ CONF.set_override('max_concurrent_clean', 3, group='conductor')
+ self.assertRaises(
+ exception.ConcurrentActionLimit,
+ self.service._concurrent_action_limit,
+ 'cleaning')
+ self.assertRaises(
+ exception.ConcurrentActionLimit,
+ self.service._concurrent_action_limit,
+ 'unprovisioning')
+ self.service._concurrent_action_limit('provisioning')
+ CONF.set_override('max_concurrent_clean', 4, group='conductor')
+ self.service._concurrent_action_limit('cleaning')
+ self.service._concurrent_action_limit('unprovisioning')
diff --git a/ironic/tests/unit/db/test_nodes.py b/ironic/tests/unit/db/test_nodes.py
index eb5200f4e..b4d70b2dd 100644
--- a/ironic/tests/unit/db/test_nodes.py
+++ b/ironic/tests/unit/db/test_nodes.py
@@ -1081,3 +1081,39 @@ class DbNodeTestCase(base.DbTestCase):
self.dbapi.check_node_list,
[node1.uuid, 'this/cannot/be/a/name'])
self.assertIn('this/cannot/be/a/name', str(exc))
+
+ def test_node_provision_state_count(self):
+ active_nodes = 5
+ manageable_nodes = 3
+ deploywait_nodes = 1
+ for i in range(0, active_nodes):
+ utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ provision_state=states.ACTIVE)
+ for i in range(0, manageable_nodes):
+ utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ provision_state=states.MANAGEABLE)
+ for i in range(0, deploywait_nodes):
+ utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ provision_state=states.DEPLOYWAIT)
+
+ self.assertEqual(
+ active_nodes,
+ self.dbapi.count_nodes_in_provision_state(states.ACTIVE)
+ )
+ self.assertEqual(
+ manageable_nodes,
+ self.dbapi.count_nodes_in_provision_state(states.MANAGEABLE)
+ )
+ self.assertEqual(
+ deploywait_nodes,
+ self.dbapi.count_nodes_in_provision_state(states.DEPLOYWAIT)
+ )
+ total = active_nodes + manageable_nodes + deploywait_nodes
+ self.assertEqual(
+ total,
+ self.dbapi.count_nodes_in_provision_state([
+ states.ACTIVE,
+ states.MANAGEABLE,
+ states.DEPLOYWAIT
+ ])
+ )
diff --git a/ironic/tests/unit/drivers/boot-fallback.ipxe b/ironic/tests/unit/drivers/boot-fallback.ipxe
index bf8ab414c..ada2646a6 100644
--- a/ironic/tests/unit/drivers/boot-fallback.ipxe
+++ b/ironic/tests/unit/drivers/boot-fallback.ipxe
@@ -10,6 +10,12 @@ isset ${net${netid}/mac} || goto loop_done
echo Attempting to boot from MAC ${net${netid}/mac:hexhyp}
chain pxelinux.cfg/${net${netid}/mac:hexhyp} || goto loop
+# If we've got here the chained config returned success
+# suggesting "sanboot" in boot_whole_disk failed (some UEFI cases)
+# exit 0 so the bios continues to the next device
+echo Exiting pxe config to allow boot to continue on next device
+exit 0
+
:loop_done
chain inspector.ipxe | goto boot_failed
diff --git a/ironic/tests/unit/drivers/boot.ipxe b/ironic/tests/unit/drivers/boot.ipxe
index aa8ee9e51..006bcb4aa 100644
--- a/ironic/tests/unit/drivers/boot.ipxe
+++ b/ironic/tests/unit/drivers/boot.ipxe
@@ -10,6 +10,12 @@ isset ${net${netid}/mac} || goto loop_done
echo Attempting to boot from MAC ${net${netid}/mac:hexhyp}
chain pxelinux.cfg/${net${netid}/mac:hexhyp} || goto loop
+# If we've got here the chained config returned success
+# suggesting "sanboot" in boot_whole_disk failed (some UEFI cases)
+# exit 0 so the bios continues to the next device
+echo Exiting pxe config to allow boot to continue on next device
+exit 0
+
:loop_done
echo PXE boot failed! No configuration found for any of the present NICs.
echo Press any key to reboot...
diff --git a/ironic/tests/unit/drivers/ipxe_config.template b/ironic/tests/unit/drivers/ipxe_config.template
index 70f8a03f1..3005a73d1 100644
--- a/ironic/tests/unit/drivers/ipxe_config.template
+++ b/ironic/tests/unit/drivers/ipxe_config.template
@@ -25,12 +25,6 @@ echo Powering off in 30 seconds.
sleep 30
poweroff
-:boot_partition
-imgfree
-kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
-initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
-boot
-
:boot_anaconda
imgfree
kernel http://1.2.3.4:1234/kernel text test_param inst.ks=http://fake/ks.cfg inst.stage2=http://fake/stage2 initrd=ramdisk || goto boot_anaconda
@@ -44,4 +38,4 @@ initrd http://1.2.3.4:1234/ramdisk || goto boot_ramdisk
boot
:boot_whole_disk
-sanboot --no-describe
+sanboot --no-describe || exit 0
diff --git a/ironic/tests/unit/drivers/ipxe_config_boot_from_anaconda.template b/ironic/tests/unit/drivers/ipxe_config_boot_from_anaconda.template
index 7963b3883..0c2812e85 100644
--- a/ironic/tests/unit/drivers/ipxe_config_boot_from_anaconda.template
+++ b/ironic/tests/unit/drivers/ipxe_config_boot_from_anaconda.template
@@ -25,12 +25,6 @@ echo Powering off in 30 seconds.
sleep 30
poweroff
-:boot_partition
-imgfree
-kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
-initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
-boot
-
:boot_anaconda
imgfree
kernel http://1.2.3.4:1234/kernel text test_param inst.ks=http://fake/ks.cfg inst.repo=http://1.2.3.4/path/to/os/ initrd=ramdisk || goto boot_anaconda
@@ -44,4 +38,4 @@ initrd http://1.2.3.4:1234/ramdisk || goto boot_ramdisk
boot
:boot_whole_disk
-sanboot --no-describe
+sanboot --no-describe || exit 0
diff --git a/ironic/tests/unit/drivers/ipxe_config_boot_from_iso.template b/ironic/tests/unit/drivers/ipxe_config_boot_from_iso.template
index c7133c7b6..9c889854d 100644
--- a/ironic/tests/unit/drivers/ipxe_config_boot_from_iso.template
+++ b/ironic/tests/unit/drivers/ipxe_config_boot_from_iso.template
@@ -25,12 +25,6 @@ echo Powering off in 30 seconds.
sleep 30
poweroff
-:boot_partition
-imgfree
-kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
-initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
-boot
-
:boot_anaconda
imgfree
kernel http://1.2.3.4:1234/kernel text test_param inst.ks=http://fake/ks.cfg inst.stage2=http://fake/stage2 initrd=ramdisk || goto boot_anaconda
@@ -42,4 +36,4 @@ imgfree
sanboot http://1.2.3.4:1234/uuid/iso
:boot_whole_disk
-sanboot --no-describe
+sanboot --no-describe || exit 0
diff --git a/ironic/tests/unit/drivers/ipxe_config_boot_from_ramdisk.template b/ironic/tests/unit/drivers/ipxe_config_boot_from_ramdisk.template
index 70f8a03f1..3005a73d1 100644
--- a/ironic/tests/unit/drivers/ipxe_config_boot_from_ramdisk.template
+++ b/ironic/tests/unit/drivers/ipxe_config_boot_from_ramdisk.template
@@ -25,12 +25,6 @@ echo Powering off in 30 seconds.
sleep 30
poweroff
-:boot_partition
-imgfree
-kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
-initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
-boot
-
:boot_anaconda
imgfree
kernel http://1.2.3.4:1234/kernel text test_param inst.ks=http://fake/ks.cfg inst.stage2=http://fake/stage2 initrd=ramdisk || goto boot_anaconda
@@ -44,4 +38,4 @@ initrd http://1.2.3.4:1234/ramdisk || goto boot_ramdisk
boot
:boot_whole_disk
-sanboot --no-describe
+sanboot --no-describe || exit 0
diff --git a/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_extra_volume.template b/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_extra_volume.template
index 0a872804a..ee619b53e 100644
--- a/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_extra_volume.template
+++ b/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_extra_volume.template
@@ -25,12 +25,6 @@ echo Powering off in 30 seconds.
sleep 30
poweroff
-:boot_partition
-imgfree
-kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
-initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
-boot
-
:boot_anaconda
imgfree
kernel http://1.2.3.4:1234/kernel text test_param inst.ks=http://fake/ks.cfg inst.stage2=http://fake/stage2 initrd=ramdisk || goto boot_anaconda
@@ -62,4 +56,4 @@ sleep 10
goto boot_iscsi
:boot_whole_disk
-sanboot --no-describe
+sanboot --no-describe || exit 0
diff --git a/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_multipath.template b/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_multipath.template
index 571216e39..ede0283c6 100644
--- a/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_multipath.template
+++ b/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_multipath.template
@@ -25,12 +25,6 @@ echo Powering off in 30 seconds.
sleep 30
poweroff
-:boot_partition
-imgfree
-kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
-initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
-boot
-
:boot_anaconda
imgfree
kernel http://1.2.3.4:1234/kernel text test_param inst.ks=http://fake/ks.cfg inst.stage2=http://fake/stage2 initrd=ramdisk || goto boot_anaconda
@@ -59,4 +53,4 @@ sleep 10
goto boot_iscsi
:boot_whole_disk
-sanboot --no-describe
+sanboot --no-describe || exit 0
diff --git a/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_no_extra_volumes.template b/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_no_extra_volumes.template
index 6b7a4394d..61fbec756 100644
--- a/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_no_extra_volumes.template
+++ b/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_no_extra_volumes.template
@@ -25,12 +25,6 @@ echo Powering off in 30 seconds.
sleep 30
poweroff
-:boot_partition
-imgfree
-kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
-initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
-boot
-
:boot_anaconda
imgfree
kernel http://1.2.3.4:1234/kernel text test_param inst.ks=http://fake/ks.cfg inst.stage2=http://fake/stage2 initrd=ramdisk || goto boot_anaconda
@@ -59,4 +53,4 @@ sleep 10
goto boot_iscsi
:boot_whole_disk
-sanboot --no-describe
+sanboot --no-describe || exit 0
diff --git a/ironic/tests/unit/drivers/ipxe_config_timeout.template b/ironic/tests/unit/drivers/ipxe_config_timeout.template
index 2458f010b..af00f5490 100644
--- a/ironic/tests/unit/drivers/ipxe_config_timeout.template
+++ b/ironic/tests/unit/drivers/ipxe_config_timeout.template
@@ -25,12 +25,6 @@ echo Powering off in 30 seconds.
sleep 30
poweroff
-:boot_partition
-imgfree
-kernel --timeout 120 http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
-initrd --timeout 120 http://1.2.3.4:1234/ramdisk || goto boot_partition
-boot
-
:boot_anaconda
imgfree
kernel --timeout 120 http://1.2.3.4:1234/kernel text test_param inst.ks=http://fake/ks.cfg inst.stage2=http://fake/stage2 initrd=ramdisk || goto boot_anaconda
@@ -44,4 +38,4 @@ initrd --timeout 120 http://1.2.3.4:1234/ramdisk || goto boot_ramdisk
boot
:boot_whole_disk
-sanboot --no-describe
+sanboot --no-describe || exit 0
diff --git a/ironic/tests/unit/drivers/modules/ansible/test_deploy.py b/ironic/tests/unit/drivers/modules/ansible/test_deploy.py
index ed9199575..3f295c4d9 100644
--- a/ironic/tests/unit/drivers/modules/ansible/test_deploy.py
+++ b/ironic/tests/unit/drivers/modules/ansible/test_deploy.py
@@ -623,24 +623,6 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
{'instance_info.image_source': INSTANCE_INFO['image_source']},
mock.ANY)
- @mock.patch.object(deploy_utils, 'get_boot_option',
- return_value='netboot', autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
- def test_validate_not_iwdi_netboot(self, pxe_boot_validate_mock,
- get_boot_mock):
- driver_internal_info = dict(DRIVER_INTERNAL_INFO)
- driver_internal_info['is_whole_disk_image'] = False
- self.node.driver_internal_info = driver_internal_info
- self.node.save()
-
- with task_manager.acquire(
- self.context, self.node['uuid'], shared=False) as task:
- self.assertRaises(exception.InvalidParameterValue,
- self.driver.validate, task)
- pxe_boot_validate_mock.assert_called_once_with(
- task.driver.boot, task)
- get_boot_mock.assert_called_once_with(task.node)
-
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
return_value=2000)
@mock.patch.object(utils, 'node_power_action', autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/drac/test_raid.py b/ironic/tests/unit/drivers/modules/drac/test_raid.py
index 780d2893c..acbd009d3 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_raid.py
@@ -2457,13 +2457,145 @@ class DracRedfishRAIDTestCase(test_utils.BaseDracTest):
self.assertEqual(False, result)
mock_log.assert_called_once()
+ @mock.patch.object(deploy_utils, 'reboot_to_finish_step',
+ autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode(
+ self, mock_get_system, mock_reboot):
+ mock_task_mon = mock.Mock(task_monitor_uri='/TaskService/1')
+ mock_oem_controller = mock.Mock()
+ mock_oem_controller.convert_to_raid.return_value = mock_task_mon
+ mock_controller = mock.Mock()
+ mock_controller.get_oem_extension.return_value = mock_oem_controller
+ mock_controllers_col = mock.Mock()
+ mock_controllers_col.get_members.return_value = [mock_controller]
+ mock_storage = mock.Mock(controllers=mock_controllers_col)
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertEqual(
+ ['/TaskService/1'],
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertEqual(mock_reboot.return_value, result)
+
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_no_conversion(
+ self, mock_get_system):
+ mock_oem_controller = mock.Mock()
+ mock_oem_controller.convert_to_raid.return_value = None
+ mock_controller = mock.Mock()
+ mock_controller.get_oem_extension.return_value = mock_oem_controller
+ mock_controllers_col = mock.Mock()
+ mock_controllers_col.get_members.return_value = [mock_controller]
+ mock_storage = mock.Mock(controllers=mock_controllers_col)
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+
+ @mock.patch.object(drac_raid.LOG, 'warning', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_not_raid(
+ self, mock_get_system, mock_log):
+ mock_storage = mock.Mock(storage_controllers=None)
+ mock_controllers = mock.PropertyMock(
+ side_effect=sushy.exceptions.MissingAttributeError)
+ type(mock_storage).controllers = mock_controllers
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+ mock_log.assert_not_called()
+
+ @mock.patch.object(drac_raid.LOG, 'warning', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_old_idrac(
+ self, mock_get_system, mock_log):
+ mock_storage = mock.Mock(storage_controllers=mock.Mock())
+ mock_controllers = mock.PropertyMock(
+ side_effect=sushy.exceptions.MissingAttributeError)
+ type(mock_storage).controllers = mock_controllers
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+ mock_log.assert_called_once()
+
+ @mock.patch.object(drac_raid.LOG, 'warning', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_old_sushy(
+ self, mock_get_system, mock_log):
+ mock_storage = mock.Mock(spec=[])
+ mock_storage.identity = "Storage 1"
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+ mock_log.assert_called_once()
+
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_old_sushy_oem(
+ self, mock_get_system):
+ mock_controller = mock.Mock()
+ mock_controller.get_oem_extension.side_effect =\
+ sushy.exceptions.ExtensionError
+ mock_controllers_col = mock.Mock()
+ mock_controllers_col.get_members.return_value = [mock_controller]
+ mock_storage = mock.Mock(controllers=mock_controllers_col)
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_convert_controller_to_raid_mode', autospec=True)
@mock.patch.object(deploy_utils, 'get_async_step_return_state',
autospec=True)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
def test_post_delete_configuration_foreign_async(
self, mock_get_system, mock_build_agent_options,
- mock_get_async_step_return_state):
+ mock_get_async_step_return_state, mock_convert):
fake_oem_system = mock.Mock()
fake_system = mock.Mock()
fake_system.get_oem_extension.return_value = fake_oem_system
@@ -2497,9 +2629,13 @@ class DracRedfishRAIDTestCase(test_utils.BaseDracTest):
mock_get_async_step_return_state.assert_called_once_with(task.node)
mock_task_mon1.wait.assert_not_called()
mock_task_mon2.wait.assert_not_called()
+ mock_convert.assert_not_called()
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_convert_controller_to_raid_mode', autospec=True)
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
- def test_post_delete_configuration_foreign_sync(self, mock_get_system):
+ def test_post_delete_configuration_foreign_sync(
+ self, mock_get_system, mock_convert):
fake_oem_system = mock.Mock()
fake_system = mock.Mock()
fake_system.get_oem_extension.return_value = fake_oem_system
@@ -2520,15 +2656,34 @@ class DracRedfishRAIDTestCase(test_utils.BaseDracTest):
mock_task_mon2.get_task.return_value = mock_task2
fake_oem_system.clear_foreign_config.return_value = [
mock_task_mon1, mock_task_mon2]
+ mock_convert_state = mock.Mock()
+ mock_convert.return_value = mock_convert_state
result = self.raid.post_delete_configuration(
task, None, return_state=mock_return_state1)
- self.assertEqual(result, mock_return_state1)
+ self.assertEqual(result, mock_convert_state)
fake_oem_system.clear_foreign_config.assert_called_once()
mock_task_mon1.wait.assert_called_once_with(CONF.drac.raid_job_timeout)
mock_task_mon2.wait.assert_not_called()
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_convert_controller_to_raid_mode', autospec=True)
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_clear_foreign_config', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test_post_delete_configuration_no_subtasks(
+ self, mock_get_system, mock_foreign, mock_convert):
+ mock_foreign.return_value = False
+ mock_convert.return_value = None
+ task = mock.Mock(node=self.node, context=self.context)
+ mock_return_state1 = mock.Mock()
+
+ result = self.raid.post_delete_configuration(
+ task, None, return_state=mock_return_state1)
+
+ self.assertEqual(mock_return_state1, result)
+
@mock.patch.object(drac_raid.LOG, 'warning', autospec=True)
def test__clear_foreign_config_attribute_error(self, mock_log):
fake_oem_system = mock.Mock(spec=[])
@@ -2682,6 +2837,41 @@ class DracRedfishRAIDTestCase(test_utils.BaseDracTest):
task.node.driver_internal_info.get('raid_task_monitor_uris'))
self.raid._set_failed.assert_called_once()
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_convert_controller_to_raid_mode', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_task_monitor', autospec=True)
+ def test__check_raid_tasks_status_convert_controller(
+ self, mock_get_task_monitor, mock_convert):
+ driver_internal_info = {
+ 'raid_task_monitor_uris': '/TaskService/1',
+ 'raid_config_substep': 'clear_foreign_config'}
+ self.node.driver_internal_info = driver_internal_info
+ self.node.save()
+
+ mock_config_task = mock.Mock()
+ mock_config_task.task_state = sushy.TASK_STATE_COMPLETED
+ mock_config_task.task_status = sushy.HEALTH_OK
+ mock_task_monitor = mock.Mock()
+ mock_task_monitor.is_processing = False
+ mock_task_monitor.get_task.return_value = mock_config_task
+ mock_get_task_monitor.return_value = mock_task_monitor
+
+ self.raid._set_success = mock.Mock()
+ self.raid._set_failed = mock.Mock()
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.raid._check_raid_tasks_status(
+ task, ['/TaskService/1'])
+
+ mock_convert.assert_called_once_with(task)
+ self.raid._set_success.assert_not_called()
+ self.raid._set_failed.assert_not_called()
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_config_substep'))
+
@mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
autospec=True)
@mock.patch.object(manager_utils, 'notify_conductor_resume_clean',
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_boot.py b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
index 128f603c5..8aa6f78da 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
@@ -116,9 +116,9 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
self.assertEqual(expected_driver_info, actual_driver_info)
def test_parse_driver_info_deploy_config(self):
- CONF.conductor.deploy_kernel = 'kernel'
- CONF.conductor.deploy_ramdisk = 'ramdisk'
- CONF.conductor.bootloader = 'bootloader'
+ CONF.set_override('deploy_kernel', 'kernel', 'conductor')
+ CONF.set_override('deploy_ramdisk', 'ramdisk', 'conductor')
+ CONF.set_override('bootloader', 'bootloader', 'conductor')
expected_driver_info = {'deploy_kernel': 'kernel',
'deploy_ramdisk': 'ramdisk',
'bootloader': 'bootloader',
@@ -128,10 +128,9 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
self.assertEqual(expected_driver_info, actual_driver_info)
def test_parse_driver_info_rescue_config(self):
- CONF.conductor.rescue_kernel = 'kernel'
- CONF.conductor.rescue_ramdisk = 'ramdisk'
- CONF.conductor.bootloader = 'bootloader'
-
+ CONF.set_override('rescue_kernel', 'kernel', 'conductor')
+ CONF.set_override('rescue_ramdisk', 'ramdisk', 'conductor')
+ CONF.set_override('bootloader', 'bootloader', 'conductor')
expected_driver_info = {'rescue_kernel': 'kernel',
'rescue_ramdisk': 'ramdisk',
'bootloader': 'bootloader',
@@ -141,9 +140,8 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
self.assertEqual(expected_driver_info, actual_driver_info)
def test_parse_driver_info_bootloader_none(self):
- CONF.conductor.deploy_kernel = 'kernel'
- CONF.conductor.deploy_ramdisk = 'ramdisk'
-
+ CONF.set_override('deploy_kernel', 'kernel', 'conductor')
+ CONF.set_override('deploy_ramdisk', 'ramdisk', 'conductor')
self.assertRaisesRegex(exception.MissingParameterValue, 'bootloader',
ilo_boot.parse_driver_info, self.node)
@@ -452,14 +450,14 @@ class IloVirtualMediaBootTestCase(test_common.BaseIloTest):
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
- def test_validate_ramdisk_boot_option_glance(self, is_glance_image_mock,
- validate_href_mock,
- val_driver_info_mock):
+ def test_validate_ramdisk_deploy_glance(self, is_glance_image_mock,
+ validate_href_mock,
+ val_driver_info_mock):
instance_info = self.node.instance_info
boot_iso = '6b2f0c0c-79e8-4db6-842e-43c9764204af'
instance_info['boot_iso'] = boot_iso
- instance_info['capabilities'] = '{"boot_option": "ramdisk"}'
self.node.instance_info = instance_info
+ self.node.deploy_interface = 'ramdisk'
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -475,14 +473,14 @@ class IloVirtualMediaBootTestCase(test_common.BaseIloTest):
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
- def test_validate_ramdisk_boot_option_webserver(self, is_glance_image_mock,
- validate_href_mock,
- val_driver_info_mock):
+ def test_validate_ramdisk_deploy_webserver(self, is_glance_image_mock,
+ validate_href_mock,
+ val_driver_info_mock):
instance_info = self.node.instance_info
boot_iso = 'http://myserver/boot.iso'
instance_info['boot_iso'] = boot_iso
- instance_info['capabilities'] = '{"boot_option": "ramdisk"}'
self.node.instance_info = instance_info
+ self.node.deploy_interface = 'ramdisk'
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -499,18 +497,18 @@ class IloVirtualMediaBootTestCase(test_common.BaseIloTest):
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
- def test_validate_ramdisk_boot_option_webserver_exc(self,
- is_glance_image_mock,
- validate_href_mock,
- val_driver_info_mock,
- log_mock):
+ def test_validate_ramdisk_deploy_webserver_exc(self,
+ is_glance_image_mock,
+ validate_href_mock,
+ val_driver_info_mock,
+ log_mock):
instance_info = self.node.instance_info
validate_href_mock.side_effect = exception.ImageRefValidationFailed(
image_href='http://myserver/boot.iso', reason='fail')
boot_iso = 'http://myserver/boot.iso'
instance_info['boot_iso'] = boot_iso
- instance_info['capabilities'] = '{"boot_option": "ramdisk"}'
self.node.instance_info = instance_info
+ self.node.deploy_interface = 'ramdisk'
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -523,7 +521,7 @@ class IloVirtualMediaBootTestCase(test_common.BaseIloTest):
is_glance_image_mock.assert_called_once_with(boot_iso)
validate_href_mock.assert_called_once_with(mock.ANY, boot_iso)
self.assertFalse(val_driver_info_mock.called)
- self.assertIn("Virtual media deploy with 'ramdisk' boot_option "
+ self.assertIn("Virtual media deploy with 'ramdisk' deploy "
"accepts only Glance images or HTTP(S) URLs as "
"instance_info['boot_iso'].",
log_mock.call_args[0][0])
@@ -857,7 +855,7 @@ class IloVirtualMediaBootTestCase(test_common.BaseIloTest):
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
- def _test_prepare_instance_whole_disk_image(
+ def test_prepare_instance_whole_disk_image(
self, cleanup_vmedia_boot_mock, set_boot_device_mock,
update_boot_mode_mock, update_secure_boot_mode_mock,
is_iscsi_boot_mock):
@@ -877,41 +875,31 @@ class IloVirtualMediaBootTestCase(test_common.BaseIloTest):
self.assertIsNone(task.node.driver_internal_info.get(
'ilo_uefi_iscsi_boot'))
- def test_prepare_instance_whole_disk_image_local(self):
- self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
- self.node.save()
- self._test_prepare_instance_whole_disk_image()
-
- def test_prepare_instance_whole_disk_image(self):
- self._test_prepare_instance_whole_disk_image()
-
@mock.patch.object(deploy_utils, 'is_iscsi_boot',
spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'configure_secure_boot_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
- @mock.patch.object(ilo_boot.IloVirtualMediaBoot,
- '_configure_vmedia_boot', spec_set=True,
+ @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_prepare_instance_partition_image(
- self, cleanup_vmedia_boot_mock, configure_vmedia_mock,
+ self, cleanup_vmedia_boot_mock, set_boot_device_mock,
update_boot_mode_mock, update_secure_boot_mode_mock,
is_iscsi_boot_mock):
self.node.driver_internal_info = {'root_uuid_or_disk_id': (
"12312642-09d3-467f-8e09-12385826a123")}
- self.node.instance_info = {
- 'capabilities': {'boot_option': 'netboot'}}
self.node.save()
is_iscsi_boot_mock.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
- configure_vmedia_mock.assert_called_once_with(
- mock.ANY, task, "12312642-09d3-467f-8e09-12385826a123")
+ set_boot_device_mock.assert_called_once_with(task,
+ boot_devices.DISK,
+ persistent=True)
update_boot_mode_mock.assert_called_once_with(task)
update_secure_boot_mode_mock.assert_called_once_with(task)
self.assertIsNone(task.node.driver_internal_info.get(
@@ -998,9 +986,7 @@ class IloVirtualMediaBootTestCase(test_common.BaseIloTest):
cleanup_vmedia_boot_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- instance_info = task.node.instance_info
- instance_info['capabilities'] = '{"boot_option": "ramdisk"}'
- task.node.instance_info = instance_info
+ task.node.deploy_interface = 'ramdisk'
task.node.save()
is_iscsi_boot_mock.return_value = False
url = 'http://myserver/boot.iso'
@@ -1377,7 +1363,7 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
self.config(enabled_hardware_types=['ilo5'],
enabled_boot_interfaces=['ilo-uefi-https'],
enabled_console_interfaces=['ilo'],
- enabled_deploy_interfaces=['direct'],
+ enabled_deploy_interfaces=['direct', 'ramdisk'],
enabled_inspect_interfaces=['ilo'],
enabled_management_interfaces=['ilo5'],
enabled_power_interfaces=['ilo'],
@@ -1653,16 +1639,16 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
- def test_validate_ramdisk_boot_option_glance(self, is_glance_image_mock,
- validate_href_mock,
- val_driver_info_mock,
- get_boot_mock):
+ def test_validate_ramdisk_deploy_glance(self, is_glance_image_mock,
+ validate_href_mock,
+ val_driver_info_mock,
+ get_boot_mock):
get_boot_mock.return_value = 'UEFI'
instance_info = self.node.instance_info
boot_iso = '6b2f0c0c-79e8-4db6-842e-43c9764204af'
instance_info['boot_iso'] = boot_iso
- instance_info['capabilities'] = '{"boot_option": "ramdisk"}'
self.node.instance_info = instance_info
+ self.node.deploy_interface = 'ramdisk'
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -1680,16 +1666,16 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
- def test_validate_ramdisk_boot_option_webserver(self, is_glance_image_mock,
- validate_href_mock,
- val_driver_info_mock,
- get_boot_mock):
+ def test_validate_ramdisk_deploy_webserver(self, is_glance_image_mock,
+ validate_href_mock,
+ val_driver_info_mock,
+ get_boot_mock):
get_boot_mock.return_value = 'UEFI'
instance_info = self.node.instance_info
boot_iso = 'http://myserver/boot.iso'
instance_info['boot_iso'] = boot_iso
- instance_info['capabilities'] = '{"boot_option": "ramdisk"}'
self.node.instance_info = instance_info
+ self.node.deploy_interface = 'ramdisk'
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -1708,7 +1694,7 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
- def test_validate_ramdisk_boot_option_webserver_exc(
+ def test_validate_ramdisk_deploy_webserver_exc(
self, is_glance_image_mock, validate_href_mock,
val_driver_info_mock, log_mock, get_boot_mock):
@@ -1718,8 +1704,8 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
image_href='http://myserver/boot.iso', reason='fail')
boot_iso = 'http://myserver/boot.iso'
instance_info['boot_iso'] = boot_iso
- instance_info['capabilities'] = '{"boot_option": "ramdisk"}'
self.node.instance_info = instance_info
+ self.node.deploy_interface = 'ramdisk'
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -1732,7 +1718,7 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
is_glance_image_mock.assert_called_once_with(boot_iso)
validate_href_mock.assert_called_once_with(mock.ANY, boot_iso)
self.assertFalse(val_driver_info_mock.called)
- self.assertIn("UEFI-HTTPS boot with 'ramdisk' boot_option "
+ self.assertIn("UEFI-HTTPS boot with 'ramdisk' deploy "
"accepts only Glance images or HTTPS URLs as "
"instance_info['boot_iso'].",
log_mock.call_args[0][0])
@@ -1902,7 +1888,7 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
spec_set=True, autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
- def _test_prepare_instance_local_or_whole_disk_image(
+ def test_prepare_instance_local_or_whole_disk_image(
self, set_boot_device_mock,
parse_deploy_mock, prepare_iso_mock, setup_uefi_https_mock,
cleanup_iso_mock, update_secureboot_mock):
@@ -1919,16 +1905,6 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
prepare_iso_mock.assert_not_called()
setup_uefi_https_mock.assert_not_called()
- def test_prepare_instance_image_local(self):
- self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
- self.node.save()
- self._test_prepare_instance_local_or_whole_disk_image()
-
- def test_prepare_instance_whole_disk_image(self):
- self.node.driver_internal_info = {'is_whole_disk_image': True}
- self.node.save()
- self._test_prepare_instance_local_or_whole_disk_image()
-
@mock.patch.object(boot_mode_utils, 'configure_secure_boot_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(image_utils, 'cleanup_iso_image', spec_set=True,
@@ -1937,41 +1913,30 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
spec_set=True, autospec=True)
@mock.patch.object(image_utils, 'prepare_boot_iso',
spec_set=True, autospec=True)
- @mock.patch.object(ilo_boot.IloUefiHttpsBoot, '_parse_deploy_info',
- spec_set=True, autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
def test_prepare_instance_partition_image(
self, set_boot_device_mock,
- parse_deploy_mock, prepare_iso_mock, setup_uefi_https_mock,
+ prepare_iso_mock, setup_uefi_https_mock,
cleanup_iso_mock, update_secureboot_mock):
- self.node.instance_info = {
- 'capabilities': '{"boot_option": "netboot"}'
- }
self.node.driver_internal_info = {
'root_uuid_or_disk_id': (
"12312642-09d3-467f-8e09-12385826a123")
}
self.node.driver_internal_info.update({'is_whole_disk_image': False})
self.node.save()
- d_info = {'a': 'x', 'b': 'y'}
- parse_deploy_mock.return_value = d_info
- prepare_iso_mock.return_value = "recreated-iso"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
cleanup_iso_mock.assert_called_once_with(task)
- set_boot_device_mock.assert_not_called()
- parse_deploy_mock.assert_called_once_with(mock.ANY, task.node)
- prepare_iso_mock.assert_called_once_with(
- task, d_info, root_uuid='12312642-09d3-467f-8e09-12385826a123')
+ set_boot_device_mock.assert_called_once_with(task,
+ boot_devices.DISK,
+ persistent=True)
+ prepare_iso_mock.assert_not_called()
update_secureboot_mock.assert_called_once_with(task)
- setup_uefi_https_mock.assert_called_once_with(
- task, "recreated-iso", True)
- self.assertEqual(task.node.instance_info['boot_iso'],
- "recreated-iso")
+ setup_uefi_https_mock.assert_not_called()
@mock.patch.object(boot_mode_utils, 'configure_secure_boot_if_needed',
spec_set=True, autospec=True)
@@ -1998,9 +1963,7 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- instance_info = task.node.instance_info
- instance_info['capabilities'] = '{"boot_option": "ramdisk"}'
- task.node.instance_info = instance_info
+ task.node.deploy_interface = 'ramdisk'
task.node.save()
task.driver.boot.prepare_instance(task)
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_common.py b/ironic/tests/unit/drivers/modules/ilo/test_common.py
index 605124b69..c3e22453f 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_common.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_common.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
@@ -30,6 +31,7 @@ from oslo_utils import uuidutils
from ironic.common import boot_devices
from ironic.common import exception
+from ironic.common import image_service
from ironic.common import images
from ironic.common import swift
from ironic.conductor import task_manager
@@ -374,6 +376,22 @@ class IloCommonMethodsTestCase(BaseIloTest):
expected_info = dict(self.info, **ipmi_info)
self.assertEqual(expected_info, actual_info)
+ def test_update_redfish_properties(self):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ redfish_info = {
+ "redfish_address": "1.2.3.4",
+ "redfish_username": "admin",
+ "redfish_password": "fake",
+ "redfish_verify_ca": None,
+ "redfish_system_id": "/redfish/v1/Systems/1"
+ }
+ task.node.driver_info = self.info
+ ilo_common.update_redfish_properties(task)
+ actual_info = task.node.driver_info
+ expected_info = dict(self.info, **redfish_info)
+ self.assertEqual(expected_info, actual_info)
+
def test__get_floppy_image_name(self):
image_name_expected = 'image-' + self.node.uuid
image_name_actual = ilo_common._get_floppy_image_name(self.node)
@@ -434,8 +452,8 @@ class IloCommonMethodsTestCase(BaseIloTest):
tempfile_mock.return_value = mock_image_file_handle
self.config(use_web_server_for_images=True, group='ilo')
deploy_args = {'arg1': 'val1', 'arg2': 'val2'}
- CONF.deploy.http_url = "http://abc.com/httpboot"
- CONF.deploy.http_root = "/httpboot"
+ CONF.set_override('http_url', 'http://abc.com/httpboot', 'deploy')
+ CONF.set_override('http_root', '/httpboot', 'deploy')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -676,8 +694,8 @@ class IloCommonMethodsTestCase(BaseIloTest):
swift_obj_mock = swift_api_mock.return_value
boot_iso = 'swift:object-name'
swift_obj_mock.get_temp_url.return_value = 'image_url'
- CONF.ilo.swift_ilo_container = 'ilo_cont'
- CONF.ilo.swift_object_expiry_timeout = 1
+ CONF.set_override('swift_ilo_container', 'ilo_cont', 'ilo')
+ CONF.set_override('swift_object_expiry_timeout', 1, 'ilo')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.setup_vmedia_for_boot(task, boot_iso)
@@ -704,7 +722,7 @@ class IloCommonMethodsTestCase(BaseIloTest):
def test_cleanup_vmedia_boot(self, get_name_mock, swift_api_mock,
eject_mock):
swift_obj_mock = swift_api_mock.return_value
- CONF.ilo.swift_ilo_container = 'ilo_cont'
+ CONF.set_override('swift_ilo_container', 'ilo_cont', 'ilo')
get_name_mock.return_value = 'image-node-uuid'
@@ -727,7 +745,7 @@ class IloCommonMethodsTestCase(BaseIloTest):
exc = exception.SwiftOperationError('error')
swift_obj_mock = swift_api_mock.return_value
swift_obj_mock.delete_object.side_effect = exc
- CONF.ilo.swift_ilo_container = 'ilo_cont'
+ CONF.set_override('swift_ilo_container', 'ilo_cont', 'ilo')
get_name_mock.return_value = 'image-node-uuid'
@@ -752,7 +770,7 @@ class IloCommonMethodsTestCase(BaseIloTest):
exc = exception.SwiftObjectNotFoundError('error')
swift_obj_mock = swift_api_mock.return_value
swift_obj_mock.delete_object.side_effect = exc
- CONF.ilo.swift_ilo_container = 'ilo_cont'
+ CONF.set_override('swift_ilo_container', 'ilo_cont', 'ilo')
get_name_mock.return_value = 'image-node-uuid'
@@ -771,7 +789,7 @@ class IloCommonMethodsTestCase(BaseIloTest):
def test_cleanup_vmedia_boot_for_webserver(self,
destroy_image_mock,
eject_mock):
- CONF.ilo.use_web_server_for_images = True
+ CONF.set_override('use_web_server_for_images', True, 'ilo')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -932,8 +950,8 @@ class IloCommonMethodsTestCase(BaseIloTest):
autospec=True)
def test_copy_image_to_web_server(self, copy_mock,
chmod_mock):
- CONF.deploy.http_url = "http://x.y.z.a/webserver/"
- CONF.deploy.http_root = "/webserver"
+ CONF.set_override('http_url', 'http://x.y.z.a/webserver/', 'deploy')
+ CONF.set_override('http_root', '/webserver', 'deploy')
expected_url = "http://x.y.z.a/webserver/image-UUID"
source = 'tmp_image_file'
destination = "image-UUID"
@@ -949,8 +967,8 @@ class IloCommonMethodsTestCase(BaseIloTest):
autospec=True)
def test_copy_image_to_web_server_fails(self, copy_mock,
chmod_mock):
- CONF.deploy.http_url = "http://x.y.z.a/webserver/"
- CONF.deploy.http_root = "/webserver"
+ CONF.set_override('http_url', 'http://x.y.z.a/webserver/', 'deploy')
+ CONF.set_override('http_root', '/webserver', 'deploy')
source = 'tmp_image_file'
destination = "image-UUID"
image_path = "/webserver/image-UUID"
@@ -965,8 +983,8 @@ class IloCommonMethodsTestCase(BaseIloTest):
@mock.patch.object(ilo_common, 'ironic_utils', autospec=True)
def test_remove_image_from_web_server(self, utils_mock):
# | GIVEN |
- CONF.deploy.http_url = "http://x.y.z.a/webserver/"
- CONF.deploy.http_root = "/webserver"
+ CONF.set_override('http_url', 'http://x.y.z.a/webserver/', 'deploy')
+ CONF.set_override('http_root', '/webserver', 'deploy')
object_name = 'tmp_image_file'
# | WHEN |
ilo_common.remove_image_from_web_server(object_name)
@@ -1076,7 +1094,7 @@ class IloCommonMethodsTestCase(BaseIloTest):
def test_destroy_floppy_image_from_web_server(self, get_floppy_name_mock,
utils_mock):
get_floppy_name_mock.return_value = 'image-uuid'
- CONF.deploy.http_root = "/webserver/"
+ CONF.set_override('http_root', '/webserver/', 'deploy')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.destroy_floppy_image_from_web_server(task.node)
@@ -1220,7 +1238,7 @@ class IloCommonMethodsTestCase(BaseIloTest):
autospec=True)
def test__get_certificate_file_list_none(self, path_exists_mock):
cl = None
- CONF.webserver_verify_ca = '/file/path'
+ CONF.set_override('webserver_verify_ca', '/file/path')
path_exists_mock.return_value = True
expected = ['/file/path']
actual = ilo_common._get_certificate_file_list(cl)
@@ -1230,7 +1248,7 @@ class IloCommonMethodsTestCase(BaseIloTest):
autospec=True)
def test__get_certificate_file_list_empty(self, path_exists_mock):
cl = []
- CONF.webserver_verify_ca = '/file/path'
+ CONF.set_override('webserver_verify_ca', '/file/path')
path_exists_mock.return_value = True
expected = ['/file/path']
actual = ilo_common._get_certificate_file_list(cl)
@@ -1240,7 +1258,7 @@ class IloCommonMethodsTestCase(BaseIloTest):
autospec=True)
def test__get_certificate_file_list_empty_no_path(self, path_exists_mock):
cl = []
- CONF.webserver_verify_ca = '/file/path'
+ CONF.set_override('webserver_verify_ca', '/file/path')
path_exists_mock.return_value = False
expected = []
actual = ilo_common._get_certificate_file_list(cl)
@@ -1248,14 +1266,14 @@ class IloCommonMethodsTestCase(BaseIloTest):
def test__get_certificate_file_list(self):
cl = ['file/path/a', 'file/path/b']
- CONF.webserver_verify_ca = '/file/path/c'
+ CONF.set_override('webserver_verify_ca', '/file/path/c')
expected = cl
actual = ilo_common._get_certificate_file_list(cl)
self.assertEqual(expected, actual)
def test__get_certificate_file_list_string_type(self):
cl = 'file/path/a'
- CONF.webserver_verify_ca = '/file/path/c'
+ CONF.set_override('webserver_verify_ca', '/file/path/c')
self.assertRaisesRegex(exception.InvalidParameterValue,
"List of files is .* \"<class 'str'>\" .*",
ilo_common._get_certificate_file_list, cl)
@@ -1355,7 +1373,7 @@ class IloCommonMethodsTestCase(BaseIloTest):
autospec=True)
def test_add_certificates_raises_ilo_error(self, get_ilo_object_mock,
get_cl_mock):
- CONF.webserver_verify_ca = False
+ CONF.set_override('webserver_verify_ca', False)
ilo_mock_object = get_ilo_object_mock.return_value
c_l = ['/file/path/a', '/file/path/b']
get_cl_mock.return_value = c_l
@@ -1504,3 +1522,37 @@ class IloCommonMethodsTestCase(BaseIloTest):
self.assertRaises(exception.IloOperationError,
ilo_common.setup_uefi_https,
task, iso, True)
+
+ @mock.patch.object(image_service, 'FileImageService', spec_set=True,
+ autospec=True)
+ @mock.patch.object(image_service, 'HttpImageService', spec_set=True,
+ autospec=True)
+ @mock.patch.object(builtins, 'open', autospec=True)
+ def test_download_file_url(self, open_mock, http_mock, file_mock):
+ url = "file:///test1/iLO.crt"
+ target_file = "/a/b/c"
+ fd_mock = mock.MagicMock(spec=io.BytesIO)
+ open_mock.return_value = fd_mock
+ fd_mock.__enter__.return_value = fd_mock
+ ilo_common.download(target_file, url)
+ open_mock.assert_called_once_with(target_file, 'wb')
+ http_mock.assert_not_called()
+ file_mock.return_value.download.assert_called_once_with(
+ "/test1/iLO.crt", fd_mock)
+
+ @mock.patch.object(image_service, 'FileImageService', spec_set=True,
+ autospec=True)
+ @mock.patch.object(image_service, 'HttpImageService', spec_set=True,
+ autospec=True)
+ @mock.patch.object(builtins, 'open', autospec=True)
+ def test_download_http_url(self, open_mock, http_mock, file_mock):
+ url = "http://1.1.1.1/iLO.crt"
+ target_file = "/a/b/c"
+ fd_mock = mock.MagicMock(spec=io.BytesIO)
+ open_mock.return_value = fd_mock
+ fd_mock.__enter__.return_value = fd_mock
+ ilo_common.download(target_file, url)
+ http_mock.return_value.download.assert_called_once_with(
+ "http://1.1.1.1/iLO.crt", fd_mock)
+ file_mock.assert_not_called()
+ open_mock.assert_called_once_with(target_file, 'wb')
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_management.py b/ironic/tests/unit/drivers/modules/ilo/test_management.py
index e4d891c3d..f087c4d58 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_management.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_management.py
@@ -14,9 +14,12 @@
"""Test class for Management Interface used by iLO modules."""
+import os
+import shutil
from unittest import mock
import ddt
+from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import uuidutils
@@ -42,6 +45,8 @@ ilo_error = importutils.try_import('proliantutils.exception')
INFO_DICT = db_utils.get_test_ilo_info()
+CONF = cfg.CONF
+
@ddt.ddt
class IloManagementTestCase(test_common.BaseIloTest):
@@ -424,6 +429,116 @@ class IloManagementTestCase(test_common.BaseIloTest):
step_mock.assert_called_once_with(
task.node, 'update_authentication_failure_logging', '1', False)
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ def test_create_csr(self, os_mock, step_mock):
+ csr_params_args = {
+ "City": "Bangalore",
+ "CommonName": "1.1.1.1",
+ "Country": "ABC",
+ "OrgName": "DEF",
+ "State": "IJK"
+ }
+ csr_args = {
+ "csr_params": csr_params_args}
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.driver.management.create_csr(task, **csr_args)
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ step_mock.assert_called_once_with(task.node, 'create_csr',
+ cert_path, csr_params_args)
+ os_mock.assert_called_once_with(cert_path, 0o755)
+
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ @mock.patch.object(shutil, 'copy', spec_set=True, autospec=True)
+ def test_add_https_certificate(self, shutil_mock, os_mock,
+ step_mock):
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ cert_file_args = {'cert_file': '/test1/cert'}
+ task.driver.management.add_https_certificate(
+ task, **cert_file_args)
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ cert_path_name = os.path.join(cert_path, self.node.uuid)
+ filename = cert_path_name + ".crt"
+ step_mock.assert_called_once_with(
+ task.node, 'add_https_certificate', filename)
+ os_mock.assert_called_once_with(cert_path, 0o755)
+ shutil_mock.assert_called_once_with('/test1/cert', filename)
+
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ @mock.patch.object(shutil, 'copy', spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'download', spec_set=True, autospec=True)
+ def test_add_https_certificate_fileurl(self, download_mock, shutil_mock,
+ os_mock, step_mock):
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ cert_file_args = {'cert_file': 'file:///test1/cert'}
+ task.driver.management.add_https_certificate(
+ task, **cert_file_args)
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ cert_path_name = os.path.join(cert_path, self.node.uuid)
+ fname = cert_path_name + ".crt"
+ step_mock.assert_called_once_with(
+ task.node, 'add_https_certificate', fname)
+ os_mock.assert_called_once_with(cert_path, 0o755)
+ shutil_mock.assert_not_called()
+ download_mock.assert_called_once_with(fname, 'file:///test1/cert')
+
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ @mock.patch.object(shutil, 'copy', spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'download', spec_set=True, autospec=True)
+ def test_add_https_certificate_httpurl(self, download_mock, shutil_mock,
+ os_mock, step_mock):
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ cert_file_args = {'cert_file': 'http://1.1.1.1/cert'}
+ task.driver.management.add_https_certificate(
+ task, **cert_file_args)
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ cert_path_name = os.path.join(cert_path, self.node.uuid)
+ fname = cert_path_name + ".crt"
+ step_mock.assert_called_once_with(
+ task.node, 'add_https_certificate', fname)
+ os_mock.assert_called_once_with(cert_path, 0o755)
+ shutil_mock.assert_not_called()
+ download_mock.assert_called_once_with(fname, 'http://1.1.1.1/cert')
+
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ @mock.patch.object(shutil, 'copy', spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'download', spec_set=True, autospec=True)
+ def test_add_https_certificate_url_exception(self, download_mock,
+ shutil_mock, os_mock,
+ step_mock):
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ cert_file_args = {'cert_file': 'swift://1.1.1.1/cert'}
+ self.assertRaises(exception.IloOperationNotSupported,
+ task.driver.management.add_https_certificate,
+ task,
+ **cert_file_args)
+
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ step_mock.assert_not_called()
+ os_mock.assert_called_once_with(cert_path, 0o755)
+ shutil_mock.assert_not_called()
+ download_mock.assert_not_called()
+
@mock.patch.object(deploy_utils, 'build_agent_options',
spec_set=True, autospec=True)
@mock.patch.object(ilo_boot.IloVirtualMediaBoot, 'clean_up_ramdisk',
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_vendor.py b/ironic/tests/unit/drivers/modules/ilo/test_vendor.py
index f3114826e..b7bc3cbce 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_vendor.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_vendor.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
@@ -30,6 +31,7 @@ from ironic.tests.unit.drivers.modules.ilo import test_common
class VendorPassthruTestCase(test_common.BaseIloTest):
boot_interface = 'ilo-virtual-media'
+ vendor_interface = 'ilo'
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@@ -95,3 +97,72 @@ class VendorPassthruTestCase(test_common.BaseIloTest):
task, info)
validate_image_prop_mock.assert_called_once_with(
task.context, 'foo')
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test__validate_is_it_a_supported_system(
+ self, get_ilo_object_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.maintenance = True
+ ilo_mock_object = get_ilo_object_mock.return_value
+ ilo_mock_object.get_product_name.return_value = (
+ 'ProLiant DL380 Gen10')
+ task.driver.vendor._validate_is_it_a_supported_system(task)
+ get_ilo_object_mock.assert_called_once_with(task.node)
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test__validate_is_it_a_supported_system_exception(
+ self, get_ilo_object_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.maintenance = True
+ ilo_mock_object = get_ilo_object_mock.return_value
+ ilo_mock_object.get_product_name.return_value = (
+ 'ProLiant DL380 Gen8')
+ self.assertRaises(
+ exception.IloOperationNotSupported,
+ task.driver.vendor._validate_is_it_a_supported_system, task)
+
+ @mock.patch.object(ilo_common, 'parse_driver_info',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'update_redfish_properties',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ilo_vendor.VendorPassthru,
+ '_validate_is_it_a_supported_system',
+ spec_set=True, autospec=True)
+ def test_validate_create_subscription(self, validate_redfish_system_mock,
+ redfish_properties_mock,
+ driver_info_mock):
+ self.node.vendor_interface = 'ilo'
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ d_info = {'ilo_address': '1.1.1.1',
+ 'ilo_username': 'user',
+ 'ilo_password': 'password',
+ 'ilo_verify_ca': False}
+ driver_info_mock.return_value = d_info
+ redfish_properties = {'redfish_address': '1.1.1.1',
+ 'redfish_username': 'user',
+ 'redfish_password': 'password',
+ 'redfish_system_id': '/redfish/v1/Systems/1',
+ 'redfish_verify_ca': False}
+ redfish_properties_mock.return_value = redfish_properties
+ kwargs = {'Destination': 'https://someulr',
+ 'Context': 'MyProtocol'}
+ task.driver.vendor.validate(task, 'create_subscription', **kwargs)
+ driver_info_mock.assert_called_once_with(task.node)
+ redfish_properties_mock.assert_called_once_with(task)
+ validate_redfish_system_mock.assert_called_once_with(
+ task.driver.vendor, task)
+
+ def test_validate_operation_exeption(self):
+ self.node.vendor_interface = 'ilo'
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaises(
+ exception.IloOperationNotSupported,
+ task.driver.vendor.validate, task, 'eject_vmedia')
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_boot.py b/ironic/tests/unit/drivers/modules/irmc/test_boot.py
index 1822b9965..6b57c7504 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_boot.py
@@ -63,6 +63,7 @@ PARSED_IFNO = {
'irmc_snmp_port': 161,
'irmc_snmp_version': snmp.SNMP_V2C,
'irmc_sensor_method': 'ipmitool',
+ 'irmc_verify_ca': True,
}
@@ -74,13 +75,14 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def setUp(self):
super(IRMCDeployPrivateMethodsTestCase, self).setUp()
- CONF.irmc.remote_image_share_root = '/remote_image_share_root'
- CONF.irmc.remote_image_server = '10.20.30.40'
- CONF.irmc.remote_image_share_type = 'NFS'
- CONF.irmc.remote_image_share_name = 'share'
- CONF.irmc.remote_image_user_name = 'admin'
- CONF.irmc.remote_image_user_password = 'admin0'
- CONF.irmc.remote_image_user_domain = 'local'
+ CONF.set_override('remote_image_share_root',
+ '/remote_image_share_root', 'irmc')
+ CONF.set_override('remote_image_server', '10.20.30.40', 'irmc')
+ CONF.set_override('remote_image_share_type', 'NFS', 'irmc')
+ CONF.set_override('remote_image_share_name', 'share', 'irmc')
+ CONF.set_override('remote_image_user_name', 'admin', 'irmc')
+ CONF.set_override('remote_image_user_password', 'admin0', 'irmc')
+ CONF.set_override('remote_image_user_domain', 'local', 'irmc')
@mock.patch.object(os.path, 'isdir', spec_set=True, autospec=True)
def test__parse_config_option(self, isdir_mock,
@@ -95,7 +97,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(os.path, 'isdir', spec_set=True, autospec=True)
def test__parse_config_option_non_existed_root(
self, isdir_mock, check_share_fs_mounted_mock):
- CONF.irmc.remote_image_share_root = '/non_existed_root'
+ CONF.set_override('remote_image_share_root', '/non_existed_root',
+ 'irmc')
isdir_mock.return_value = False
self.assertRaises(exception.InvalidParameterValue,
@@ -248,7 +251,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test__parse_instance_info_with_boot_iso_file_name_ok(
self, check_share_fs_mounted_mock):
"""With optional 'boot_iso' file name."""
- CONF.irmc.remote_image_share_root = '/etc'
+ CONF.set_override('remote_image_share_root', '/etc', 'irmc')
self.node.instance_info['boot_iso'] = 'hosts'
instance_info_expected = {'boot_iso': 'hosts'}
instance_info_actual = irmc_boot._parse_instance_info(self.node)
@@ -258,7 +261,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test__parse_instance_info_with_boot_iso_deprecated(
self, check_share_fs_mounted_mock):
"""With optional 'irmc_boot_iso' file name."""
- CONF.irmc.remote_image_share_root = '/etc'
+ CONF.set_override('remote_image_share_root', '/etc', 'irmc')
self.node.instance_info['irmc_boot_iso'] = 'hosts'
instance_info_expected = {'boot_iso': 'hosts'}
instance_info_actual = irmc_boot._parse_instance_info(self.node)
@@ -268,7 +271,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test__parse_instance_info_without_boot_iso_ok(
self, check_share_fs_mounted_mock):
"""With optional no 'boot_iso' file name."""
- CONF.irmc.remote_image_share_root = '/etc'
+ CONF.set_override('remote_image_share_root', '/etc', 'irmc')
self.node.instance_info['boot_iso'] = None
instance_info_expected = {}
@@ -335,7 +338,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True)
def test__parse_instance_info_with_boot_iso_invalid(
self, isfile_mock, check_share_fs_mounted_mock):
- CONF.irmc.remote_image_share_root = '/etc'
+ CONF.set_override('remote_image_share_root', '/etc', 'irmc')
isfile_mock.return_value = False
with task_manager.acquire(self.context, self.node.uuid) as task:
@@ -357,7 +360,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test_parse_deploy_info_ok(self, mock_isfile,
get_image_instance_info_mock,
check_share_fs_mounted_mock):
- CONF.irmc.remote_image_share_root = '/etc'
+ CONF.set_override('remote_image_share_root', '/etc', 'irmc')
get_image_instance_info_mock.return_value = {'a': 'b'}
driver_info_expected = {
'a': 'b',
@@ -443,7 +446,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
setup_vmedia_mock,
set_boot_device_mock,
check_share_fs_mounted_mock):
- CONF.irmc.remote_image_share_root = '/'
+ CONF.set_override('remote_image_share_root', '/', 'irmc')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -476,7 +479,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
setup_vmedia_mock,
set_boot_device_mock,
check_share_fs_mounted_mock):
- CONF.irmc.remote_image_share_root = '/'
+ CONF.set_override('remote_image_share_root', '/', 'irmc')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -550,7 +553,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
boot_mode_mock,
create_boot_iso_mock,
check_share_fs_mounted_mock):
- CONF.irmc.remote_image_share_root = '/'
+ CONF.set_override('remote_image_share_root', '/', 'irmc')
image = '733d1c44-a2ea-414b-aca7-69decf20d810'
is_image_href_ordinary_file_name_mock.return_value = False
deploy_info_mock.return_value = {'boot_iso': image}
@@ -828,7 +831,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
autospec=True)
def test__remove_share_file(self, unlink_without_raise_mock,
check_share_fs_mounted_mock):
- CONF.irmc.remote_image_share_root = '/share'
+ CONF.set_override('remote_image_share_root', '/share', 'irmc')
irmc_boot._remove_share_file("boot.iso")
@@ -844,12 +847,12 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
cd_set_params = (irmc_boot.scci
.get_virtual_cd_set_params_cmd.return_value)
- CONF.irmc.remote_image_server = '10.20.30.40'
- CONF.irmc.remote_image_user_domain = 'local'
- CONF.irmc.remote_image_share_type = 'NFS'
- CONF.irmc.remote_image_share_name = 'share'
- CONF.irmc.remote_image_user_name = 'admin'
- CONF.irmc.remote_image_user_password = 'admin0'
+ CONF.set_override('remote_image_server', '10.20.30.40', 'irmc')
+ CONF.set_override('remote_image_share_type', 'NFS', 'irmc')
+ CONF.set_override('remote_image_share_name', 'share', 'irmc')
+ CONF.set_override('remote_image_user_name', 'admin', 'irmc')
+ CONF.set_override('remote_image_user_password', 'admin0', 'irmc')
+ CONF.set_override('remote_image_user_domain', 'local', 'irmc')
irmc_boot.scci.get_share_type.return_value = 0
@@ -926,12 +929,12 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
fd_set_params = (irmc_boot.scci
.get_virtual_fd_set_params_cmd.return_value)
- CONF.irmc.remote_image_server = '10.20.30.40'
- CONF.irmc.remote_image_user_domain = 'local'
- CONF.irmc.remote_image_share_type = 'NFS'
- CONF.irmc.remote_image_share_name = 'share'
- CONF.irmc.remote_image_user_name = 'admin'
- CONF.irmc.remote_image_user_password = 'admin0'
+ CONF.set_override('remote_image_server', '10.20.30.40', 'irmc')
+ CONF.set_override('remote_image_share_type', 'NFS', 'irmc')
+ CONF.set_override('remote_image_share_name', 'share', 'irmc')
+ CONF.set_override('remote_image_user_name', 'admin', 'irmc')
+ CONF.set_override('remote_image_user_password', 'admin0', 'irmc')
+ CONF.set_override('remote_image_user_domain', 'local', 'irmc')
irmc_boot.scci.get_share_type.return_value = 0
@@ -1009,8 +1012,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
# irmc_boot.check_share_fs_mounted is mocked in
# third_party_driver_mocks.py.
# irmc_boot.check_share_fs_mounted_orig is the real function.
- CONF.irmc.remote_image_share_root = '/'
- CONF.irmc.remote_image_share_type = 'nfs'
+ CONF.set_override('remote_image_share_root', '/', 'irmc')
+ CONF.set_override('remote_image_share_type', 'nfs', 'irmc')
result = irmc_boot.check_share_fs_mounted_orig()
parse_conf_mock.assert_called_once_with()
@@ -1026,8 +1029,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
# irmc_boot.check_share_fs_mounted is mocked in
# third_party_driver_mocks.py.
# irmc_boot.check_share_fs_mounted_orig is the real function.
- CONF.irmc.remote_image_share_root = '/etc'
- CONF.irmc.remote_image_share_type = 'cifs'
+ CONF.set_override('remote_image_share_root', '/etc', 'irmc')
+ CONF.set_override('remote_image_share_type', 'cifs', 'irmc')
self.assertRaises(exception.IRMCSharedFileSystemNotMounted,
irmc_boot.check_share_fs_mounted_orig)
@@ -1144,8 +1147,9 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
autospec=True)
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
- def _test_prepare_instance_whole_disk_image(
- self, _cleanup_vmedia_boot_mock, set_boot_device_mock):
+ def test_prepare_instance_whole_disk_image(
+ self, _cleanup_vmedia_boot_mock, set_boot_device_mock,
+ check_share_fs_mounted_mock):
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
@@ -1157,26 +1161,13 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
boot_devices.DISK,
persistent=True)
- def test_prepare_instance_whole_disk_image_local(
- self, check_share_fs_mounted_mock):
- self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
- self.node.save()
- self._test_prepare_instance_whole_disk_image()
-
- def test_prepare_instance_whole_disk_image(self,
- check_share_fs_mounted_mock):
- self._test_prepare_instance_whole_disk_image()
-
- @mock.patch.object(irmc_boot.IRMCVirtualMediaBoot,
- '_configure_vmedia_boot', spec_set=True,
+ @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_prepare_instance_partition_image(
- self, _cleanup_vmedia_boot_mock, _configure_vmedia_mock,
+ self, _cleanup_vmedia_boot_mock, set_boot_device_mock,
check_share_fs_mounted_mock):
- self.node.instance_info = {
- 'capabilities': {'boot_option': 'netboot'}}
self.node.driver_internal_info = {'root_uuid_or_disk_id': "some_uuid"}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
@@ -1184,8 +1175,9 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
task.driver.boot.prepare_instance(task)
_cleanup_vmedia_boot_mock.assert_called_once_with(task)
- _configure_vmedia_mock.assert_called_once_with(mock.ANY, task,
- "some_uuid")
+ set_boot_device_mock.assert_called_once_with(task,
+ boot_devices.DISK,
+ persistent=True)
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
@@ -1253,9 +1245,10 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
self.node.driver_internal_info = {'root_uuid_or_disk_id': "12312642"}
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
+ self.node.deploy_interface = 'ramdisk'
self.node.instance_info = {
'capabilities': {
- "secure_boot": "true", 'boot_option': 'netboot'
+ "secure_boot": "true"
}
}
self.node.save()
@@ -1281,9 +1274,10 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
self.node.driver_internal_info = {'root_uuid_or_disk_id': "12312642"}
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
+ self.node.deploy_interface = 'ramdisk'
self.node.instance_info = {
'capabilities': {
- "secure_boot": "false", 'boot_option': 'netboot'
+ "secure_boot": "false"
}
}
self.node.save()
@@ -1308,11 +1302,7 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
self.node.driver_internal_info = {'root_uuid_or_disk_id': "12312642"}
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
- self.node.instance_info = {
- 'capabilities': {
- 'boot_option': 'netboot'
- }
- }
+ self.node.deploy_interface = 'ramdisk'
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_common.py b/ironic/tests/unit/drivers/modules/irmc/test_common.py
index 7598fc16b..9dbb380ba 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_common.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_common.py
@@ -16,6 +16,7 @@
Test class for common methods used by iRMC modules.
"""
+import os
from unittest import mock
from oslo_config import cfg
@@ -71,6 +72,7 @@ class IRMCValidateParametersTestCase(BaseIRMCTest):
self.assertEqual(snmp.SNMP_V2C, info['irmc_snmp_version'])
self.assertEqual(161, info['irmc_snmp_port'])
self.assertEqual('public', info['irmc_snmp_community'])
+ self.assertTrue(info['irmc_verify_ca'])
def test_parse_driver_info_snmpv3(self):
self.node.driver_info['irmc_snmp_version'] = 'v3'
@@ -111,6 +113,7 @@ class IRMCValidateParametersTestCase(BaseIRMCTest):
self.assertEqual(443, info['irmc_port'])
self.assertEqual(60, info['irmc_client_timeout'])
self.assertEqual('ipmitool', info['irmc_sensor_method'])
+ self.assertEqual(True, info['irmc_verify_ca'])
def test_parse_driver_info_missing_address(self):
del self.node.driver_info['irmc_address']
@@ -274,6 +277,41 @@ class IRMCValidateParametersTestCase(BaseIRMCTest):
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
+ @mock.patch.object(os.path, 'isabs', return_value=True, autospec=True)
+ @mock.patch.object(os.path, 'isdir', return_value=True, autospec=True)
+ def test_parse_driver_info_dir_path_verify_ca(self, mock_isdir,
+ mock_isabs):
+ fake_path = 'absolute/path/to/a/valid/CA'
+ self.node.driver_info['irmc_verify_ca'] = fake_path
+ info = irmc_common.parse_driver_info(self.node)
+ self.assertEqual(fake_path, info['irmc_verify_ca'])
+ mock_isdir.assert_called_once_with(fake_path)
+ mock_isabs.assert_called_once_with(fake_path)
+
+ @mock.patch.object(os.path, 'isabs', return_value=True, autospec=True)
+ @mock.patch.object(os.path, 'isfile', return_value=True, autospec=True)
+ def test_parse_driver_info_file_path_verify_ca(self, mock_isfile,
+ mock_isabs):
+ fake_path = 'absolute/path/to/a/valid/ca.pem'
+ self.node.driver_info['irmc_verify_ca'] = fake_path
+ info = irmc_common.parse_driver_info(self.node)
+ self.assertEqual(fake_path, info['irmc_verify_ca'])
+ mock_isfile.assert_called_once_with(fake_path)
+ mock_isabs.assert_called_once_with(fake_path)
+
+ def test_parse_driver_info_string_bool_verify_ca(self):
+ self.node.driver_info['irmc_verify_ca'] = "False"
+ info = irmc_common.parse_driver_info(self.node)
+ self.assertFalse(info['irmc_verify_ca'])
+
+ def test_parse_driver_info_invalid_verify_ca(self):
+ self.node.driver_info['irmc_verify_ca'] = "1234"
+ self.assertRaises(exception.InvalidParameterValue,
+ irmc_common.parse_driver_info, self.node)
+ self.node.driver_info['irmc_verify_ca'] = 1234
+ self.assertRaises(exception.InvalidParameterValue,
+ irmc_common.parse_driver_info, self.node)
+
class IRMCCommonMethodsTestCase(BaseIRMCTest):
@@ -283,6 +321,7 @@ class IRMCCommonMethodsTestCase(BaseIRMCTest):
self.info['irmc_port'] = 80
self.info['irmc_auth_method'] = 'digest'
self.info['irmc_client_timeout'] = 60
+ self.info['irmc_verify_ca'] = True
mock_scci.get_client.return_value = 'get_client'
returned_mock_scci_get_client = irmc_common.get_irmc_client(self.node)
mock_scci.get_client.assert_called_with(
@@ -291,6 +330,7 @@ class IRMCCommonMethodsTestCase(BaseIRMCTest):
self.info['irmc_password'],
port=self.info['irmc_port'],
auth_method=self.info['irmc_auth_method'],
+ verify=self.info['irmc_verify_ca'],
client_timeout=self.info['irmc_client_timeout'])
self.assertEqual('get_client', returned_mock_scci_get_client)
@@ -314,6 +354,7 @@ class IRMCCommonMethodsTestCase(BaseIRMCTest):
self.info['irmc_port'] = 80
self.info['irmc_auth_method'] = 'digest'
self.info['irmc_client_timeout'] = 60
+ self.info['irmc_verify_ca'] = True
mock_scci.get_report.return_value = 'get_report'
returned_mock_scci_get_report = irmc_common.get_irmc_report(self.node)
mock_scci.get_report.assert_called_with(
@@ -322,6 +363,7 @@ class IRMCCommonMethodsTestCase(BaseIRMCTest):
self.info['irmc_password'],
port=self.info['irmc_port'],
auth_method=self.info['irmc_auth_method'],
+ verify=self.info['irmc_verify_ca'],
client_timeout=self.info['irmc_client_timeout'])
self.assertEqual('get_report', returned_mock_scci_get_report)
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_raid.py b/ironic/tests/unit/drivers/modules/irmc/test_raid.py
index eefe7ff3a..54a0a0dd6 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_raid.py
@@ -702,8 +702,8 @@ class IRMCRaidConfigurationInternalMethodsTestCase(test_common.BaseIRMCTest):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
raid._commit_raid_config(task)
- get_raid_adapter_mock.assert_called_once_with(
- irmc_common.parse_driver_info(task.node))
+ irmc_info = irmc_common.parse_driver_info(task.node)
+ get_raid_adapter_mock.assert_called_once_with(irmc_info)
update_raid_info_mock.assert_called_once_with(
task.node, task.node.raid_config)
set_async_step_flags_mock.assert_called_once_with(
diff --git a/ironic/tests/unit/drivers/modules/network/test_neutron.py b/ironic/tests/unit/drivers/modules/network/test_neutron.py
index 0b70446ce..665a674a6 100644
--- a/ironic/tests/unit/drivers/modules/network/test_neutron.py
+++ b/ironic/tests/unit/drivers/modules/network/test_neutron.py
@@ -19,7 +19,6 @@ from oslo_utils import uuidutils
from ironic.common import exception
from ironic.common import neutron as neutron_common
-from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers import base as drivers_base
from ironic.drivers.modules.network import neutron
@@ -106,65 +105,6 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
context=task.context)],
validate_mock.call_args_list)
- @mock.patch.object(neutron_common, 'validate_network', autospec=True)
- def test_validate_boot_option_netboot(self, validate_mock):
- driver_internal_info = self.node.driver_internal_info
- driver_internal_info['is_whole_disk_image'] = True
- self.node.driver_internal_info = driver_internal_info
- boot_option = {'capabilities': '{"boot_option": "netboot"}'}
- self.node.instance_info = boot_option
- self.node.provision_state = states.DEPLOYING
- self.node.save()
- with task_manager.acquire(self.context, self.node.id) as task:
- self.assertRaisesRegex(
- exception.InvalidParameterValue,
- 'cannot perform "local" boot for whole disk image',
- self.interface.validate, task)
- self.assertEqual([mock.call(CONF.neutron.cleaning_network,
- 'cleaning network',
- context=task.context),
- mock.call(CONF.neutron.provisioning_network,
- 'provisioning network',
- context=task.context)],
- validate_mock.call_args_list)
-
- @mock.patch.object(neutron_common, 'validate_network', autospec=True)
- def test_validate_boot_option_netboot_no_exc(self, validate_mock):
- CONF.set_override('default_boot_option', 'netboot', 'deploy')
- driver_internal_info = self.node.driver_internal_info
- driver_internal_info['is_whole_disk_image'] = True
- self.node.driver_internal_info = driver_internal_info
- self.node.provision_state = states.AVAILABLE
- self.node.save()
- with task_manager.acquire(self.context, self.node.id) as task:
- self.interface.validate(task)
- self.assertEqual([mock.call(CONF.neutron.cleaning_network,
- 'cleaning network',
- context=task.context),
- mock.call(CONF.neutron.provisioning_network,
- 'provisioning network',
- context=task.context)],
- validate_mock.call_args_list)
-
- @mock.patch.object(neutron_common, 'validate_network', autospec=True)
- def test_validate_boot_option_local(self, validate_mock):
- driver_internal_info = self.node.driver_internal_info
- driver_internal_info['is_whole_disk_image'] = True
- self.node.driver_internal_info = driver_internal_info
- boot_option = {'capabilities': '{"boot_option": "local"}'}
- self.node.instance_info = boot_option
- self.node.provision_state = states.DEPLOYING
- self.node.save()
- with task_manager.acquire(self.context, self.node.id) as task:
- self.interface.validate(task)
- self.assertEqual([mock.call(CONF.neutron.cleaning_network,
- 'cleaning network',
- context=task.context),
- mock.call(CONF.neutron.provisioning_network,
- 'provisioning network',
- context=task.context)],
- validate_mock.call_args_list)
-
@mock.patch.object(neutron_common, 'validate_network',
side_effect=lambda n, t, context=None: n, autospec=True)
@mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_bios.py b/ironic/tests/unit/drivers/modules/redfish/test_bios.py
index cd6f9be5f..2ff3235fd 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_bios.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_bios.py
@@ -165,13 +165,16 @@ class RedfishBiosTestCase(db_base.DbTestCase):
mock_setting_list.delete.assert_called_once_with(
task.context, task.node.id, delete_names)
+ @mock.patch.object(manager_utils, 'is_fast_track', autospec=True)
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
def _test_step_pre_reboot(self, mock_power_action, mock_get_system,
- mock_build_agent_options, mock_prepare):
+ mock_build_agent_options, mock_prepare,
+ mock_fast_track, fast_track=False):
+ mock_fast_track.return_value = fast_track
if self.node.clean_step:
step_data = self.node.clean_step
check_fields = ['cleaning_reboot', 'skip_current_clean_step']
@@ -197,7 +200,13 @@ class RedfishBiosTestCase(db_base.DbTestCase):
bios.supported_apply_times = []
ret = task.driver.bios.apply_configuration(task, data)
mock_get_system.assert_called_with(task.node)
- mock_power_action.assert_called_once_with(task, states.REBOOT)
+ if fast_track:
+ mock_power_action.assert_has_calls([
+ mock.call(task, states.POWER_OFF),
+ mock.call(task, states.REBOOT),
+ ])
+ else:
+ mock_power_action.assert_called_once_with(task, states.REBOOT)
if step == 'factory_reset':
bios.reset_bios.assert_called_once()
if step == 'apply_configuration':
@@ -205,8 +214,8 @@ class RedfishBiosTestCase(db_base.DbTestCase):
attributes, apply_time=None)
mock_build_agent_options.assert_called_once_with(task.node)
mock_prepare.assert_called_once_with(mock.ANY, task, {'a': 'b'})
- info = task.node.driver_internal_info
- self.assertTrue(all(x in info for x in check_fields))
+ for field in check_fields:
+ self.assertIn(field, task.node.driver_internal_info)
self.assertEqual(expected_ret, ret)
def test_factory_reset_step_pre_reboot_cleaning(self):
@@ -221,6 +230,12 @@ class RedfishBiosTestCase(db_base.DbTestCase):
self.node.save()
self._test_step_pre_reboot()
+ def test_factory_reset_step_pre_reboot_fast_track(self):
+ self.node.clean_step = {'priority': 100, 'interface': 'bios',
+ 'step': 'factory_reset', 'argsinfo': {}}
+ self.node.save()
+ self._test_step_pre_reboot(fast_track=True)
+
def test_apply_conf_step_pre_reboot_cleaning(self):
data = [{'name': 'ProcTurboMode', 'value': 'Disabled'},
{'name': 'NicBoot1', 'value': 'NetworkBoot'}]
@@ -239,6 +254,15 @@ class RedfishBiosTestCase(db_base.DbTestCase):
self.node.save()
self._test_step_pre_reboot()
+ def test_apply_conf_step_pre_reboot_fast_track(self):
+ data = [{'name': 'ProcTurboMode', 'value': 'Disabled'},
+ {'name': 'NicBoot1', 'value': 'NetworkBoot'}]
+ self.node.clean_step = {'priority': 100, 'interface': 'bios',
+ 'step': 'apply_configuration',
+ 'argsinfo': {'settings': data}}
+ self.node.save()
+ self._test_step_pre_reboot(fast_track=True)
+
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
def _test_step_post_reboot(self, mock_get_system,
attributes_after_reboot=None):
@@ -573,7 +597,8 @@ class RedfishBiosRegistryTestCase(db_base.DbTestCase):
self.registry.registry_entries.attributes[1].read_only = False
self.registry.registry_entries.attributes[1].allowable_values =\
[{'ValueName': 'Enabled', 'ValueDisplayName': 'Enabled'},
- {'ValueName': 'Disabled', 'ValueDisplayName': 'Disabled'}]
+ {'ValueDisplayName': 'Disabled'},
+ {'Invalid': 'banana'}]
self.registry.registry_entries.attributes[2].name = "BootDelay"
self.registry.registry_entries.attributes[2].attribute_type = "Integer"
self.registry.registry_entries.attributes[2].lower_bound = 5
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py b/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py
index e2c6e75b2..61bc23e48 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py
@@ -262,9 +262,9 @@ class FirmwareUtilsTestCase(base.TestCase):
@mock.patch.object(os, 'chmod', autospec=True)
def test_stage_http(self, mock_chmod, mock_link, mock_copyfile,
mock_makedirs):
- CONF.deploy.http_url = 'http://10.0.0.2'
- CONF.deploy.external_http_url = None
- CONF.deploy.http_root = '/httproot'
+ CONF.set_override('http_url', 'http://10.0.0.2', 'deploy')
+ CONF.set_override('external_http_url', None, 'deploy')
+ CONF.set_override('http_root', '/httproot', 'deploy')
node = mock.Mock(uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3')
staged_url, need_cleanup = firmware_utils.stage(
@@ -291,9 +291,9 @@ class FirmwareUtilsTestCase(base.TestCase):
@mock.patch.object(os, 'chmod', autospec=True)
def test_stage_http_copyfile(self, mock_chmod, mock_link, mock_copyfile,
mock_makedirs):
- CONF.deploy.http_url = 'http://10.0.0.2'
- CONF.deploy.external_http_url = None
- CONF.deploy.http_root = '/httproot'
+ CONF.set_override('http_url', 'http://10.0.0.2', 'deploy')
+ CONF.set_override('external_http_url', None, 'deploy')
+ CONF.set_override('http_root', '/httproot', 'deploy')
node = mock.Mock(uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3')
mock_link.side_effect = OSError
@@ -323,9 +323,9 @@ class FirmwareUtilsTestCase(base.TestCase):
@mock.patch.object(os, 'chmod', autospec=True)
def test_stage_http_copyfile_fails(self, mock_chmod, mock_link,
mock_copyfile, mock_makedirs):
- CONF.deploy.http_url = 'http://10.0.0.2'
- CONF.deploy.external_http_url = None
- CONF.deploy.http_root = '/httproot'
+ CONF.set_override('http_url', 'http://10.0.0.2', 'deploy')
+ CONF.set_override('external_http_url', None, 'deploy')
+ CONF.set_override('http_root', '/httproot', 'deploy')
node = mock.Mock(uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3')
mock_link.side_effect = OSError
mock_copyfile.side_effect = IOError
@@ -352,9 +352,9 @@ class FirmwareUtilsTestCase(base.TestCase):
@mock.patch.object(os, 'chmod', autospec=True)
def test_stage_local_external(self, mock_chmod, mock_link, mock_rmtree,
mock_copyfile, mock_makedirs):
- CONF.deploy.http_url = 'http://10.0.0.2'
- CONF.deploy.external_http_url = 'http://90.0.0.9'
- CONF.deploy.http_root = '/httproot'
+ CONF.set_override('http_url', 'http://10.0.0.2', 'deploy')
+ CONF.set_override('external_http_url', 'http://90.0.0.9', 'deploy')
+ CONF.set_override('http_root', '/httproot', 'deploy')
node = mock.Mock(uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3')
staged_url, need_cleanup = firmware_utils.stage(
@@ -402,7 +402,7 @@ class FirmwareUtilsTestCase(base.TestCase):
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
def test_cleanup(self, mock_swift_api, mock_gettempdir, mock_rmtree):
mock_gettempdir.return_value = '/tmp'
- CONF.deploy.http_root = '/httproot'
+ CONF.set_override('http_root', '/httproot', 'deploy')
node = mock.Mock(
uuid='55cdaba0-1123-4622-8b37-bb52dd6285d3',
driver_internal_info={'firmware_cleanup': ['http', 'swift']})
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_management.py b/ironic/tests/unit/drivers/modules/redfish/test_management.py
index 93aae5de8..f8c82949a 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_management.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_management.py
@@ -836,7 +836,7 @@ class RedfishManagementTestCase(db_base.DbTestCase):
mock_update_service = mock.Mock()
mock_update_service.simple_update.return_value = mock_task_monitor
mock_get_update_service.return_value = mock_update_service
- CONF.redfish.firmware_source = 'http'
+ CONF.set_override('firmware_source', 'http', 'redfish')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.save = mock.Mock()
@@ -1346,7 +1346,7 @@ class RedfishManagementTestCase(db_base.DbTestCase):
{'task_monitor': '/task/123', 'url': 'http://test1'},
{'url': 'http://test2'}]}
self.node.driver_internal_info = driver_internal_info
- CONF.redfish.firmware_source = 'http'
+ CONF.set_override('firmware_source', 'http', 'redfish')
management = redfish_mgmt.RedfishManagement()
with task_manager.acquire(self.context, self.node.uuid,
@@ -1375,7 +1375,7 @@ class RedfishManagementTestCase(db_base.DbTestCase):
@mock.patch.object(firmware_utils, 'stage', autospec=True)
def test__stage_firmware_file_https(self, mock_stage, mock_verify_checksum,
mock_download_to_temp):
- CONF.redfish.firmware_source = 'local'
+ CONF.set_override('firmware_source', 'local', 'redfish')
firmware_update = {'url': 'https://test1', 'checksum': 'abc'}
node = mock.Mock()
mock_download_to_temp.return_value = '/tmp/test1'
@@ -1399,7 +1399,7 @@ class RedfishManagementTestCase(db_base.DbTestCase):
def test__stage_firmware_file_swift(
self, mock_get_swift_temp_url, mock_stage, mock_verify_checksum,
mock_download_to_temp):
- CONF.redfish.firmware_source = 'swift'
+ CONF.set_override('firmware_source', 'swift', 'redfish')
firmware_update = {'url': 'swift://container/bios.exe'}
node = mock.Mock()
mock_get_swift_temp_url.return_value = 'http://temp'
@@ -1423,7 +1423,7 @@ class RedfishManagementTestCase(db_base.DbTestCase):
mock_download_to_temp, mock_cleanup):
node = mock.Mock()
firmware_update = {'url': 'https://test1'}
- CONF.redfish.firmware_source = 'local'
+ CONF.set_override('firmware_source', 'local', 'redfish')
firmware_update = {'url': 'https://test1'}
node = mock.Mock()
mock_download_to_temp.return_value = '/tmp/test1'
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_raid.py b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
index b2d3a0a0e..dfb3c1473 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
@@ -1483,3 +1483,22 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
mock_reboot.assert_not_called()
# Not yet updated as in progress
self.assertEqual({}, task.node.raid_config)
+
+ @mock.patch.object(redfish_raid, 'LOG', autospec=True)
+ def test_update_raid_config_missing_raid_type(
+ self, mock_log, mock_get_system):
+ volumes = [
+ _mock_volume(
+ '1', raid_type=None,
+ capacity_bytes=100 * units.Gi),
+ _mock_volume(
+ '2', raid_type=None,
+ capacity_bytes=500 * units.Gi)]
+ self.mock_storage.volumes.get_members.return_value = volumes
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+
+ redfish_raid.update_raid_config(self.node)
+
+ self.assertEqual([], self.node.raid_config['logical_disks'])
+ mock_log.warning.assert_called_once()
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_utils.py b/ironic/tests/unit/drivers/modules/redfish/test_utils.py
index ca8aba9da..01b7089c7 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_utils.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_utils.py
@@ -252,6 +252,7 @@ class RedfishUtilsAuthTestCase(db_base.DbTestCase):
redfish_utils.get_system(self.node)
redfish_utils.get_system(self.node)
self.assertEqual(1, mock_sushy.call_count)
+ self.assertEqual(len(redfish_utils.SessionCache._sessions), 1)
@mock.patch.object(sushy, 'Sushy', autospec=True)
def test_ensure_new_session_address(self, mock_sushy):
@@ -270,6 +271,21 @@ class RedfishUtilsAuthTestCase(db_base.DbTestCase):
self.assertEqual(2, mock_sushy.call_count)
@mock.patch.object(sushy, 'Sushy', autospec=True)
+ def test_ensure_new_session_password(self, mock_sushy):
+ d_info = self.node.driver_info
+ d_info['redfish_username'] = 'foo'
+ d_info['redfish_password'] = 'bar'
+ self.node.driver_info = d_info
+ self.node.save()
+ redfish_utils.get_system(self.node)
+ d_info['redfish_password'] = 'foo'
+ self.node.driver_info = d_info
+ self.node.save()
+ redfish_utils.SessionCache._sessions = collections.OrderedDict()
+ redfish_utils.get_system(self.node)
+ self.assertEqual(2, mock_sushy.call_count)
+
+ @mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache.AUTH_CLASSES', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.SessionCache._sessions',
diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py
index 4b6d271d4..653359f33 100644
--- a/ironic/tests/unit/drivers/modules/test_agent.py
+++ b/ironic/tests/unit/drivers/modules/test_agent.py
@@ -18,7 +18,6 @@ from oslo_config import cfg
from ironic.common import dhcp_factory
from ironic.common import exception
-from ironic.common import image_service
from ironic.common import images
from ironic.common import raid
from ironic.common import states
@@ -848,212 +847,6 @@ class TestAgentDeploy(CommonTestsMixin, db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.agent.check_image_size',
autospec=True)
- @mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
- autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
- autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
- @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
- @mock.patch.object(image_service.HttpImageService, 'validate_href',
- autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork,
- 'add_provisioning_network',
- spec_set=True, autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork,
- 'unconfigure_tenant_networks',
- spec_set=True, autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork, 'validate',
- spec_set=True, autospec=True)
- def test_prepare_with_neutron_net_capabilities_as_string(
- self, validate_net_mock,
- unconfigure_tenant_net_mock, add_provisioning_net_mock,
- validate_href_mock, build_options_mock,
- pxe_prepare_ramdisk_mock, storage_driver_info_mock,
- storage_attach_volumes_mock, check_image_size_mock):
- node = self.node
- node.network_interface = 'neutron'
- instance_info = node.instance_info
- instance_info['capabilities'] = '{"lion": "roar"}'
- node.instance_info = instance_info
- node.save()
- validate_net_mock.side_effect = [
- exception.InvalidParameterValue('invalid'), None]
- with task_manager.acquire(
- self.context, self.node['uuid'], shared=False) as task:
- task.node.provision_state = states.DEPLOYING
- build_options_mock.return_value = {'a': 'b'}
- self.driver.prepare(task)
- storage_driver_info_mock.assert_called_once_with(task)
- self.assertEqual(2, validate_net_mock.call_count)
- add_provisioning_net_mock.assert_called_once_with(mock.ANY, task)
- unconfigure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- storage_attach_volumes_mock.assert_called_once_with(
- task.driver.storage, task)
- validate_href_mock.assert_called_once_with(mock.ANY, 'fake-image',
- secret=False)
- build_options_mock.assert_called_once_with(task.node)
- pxe_prepare_ramdisk_mock.assert_called_once_with(
- task.driver.boot, task, {'a': 'b'})
- check_image_size_mock.assert_called_once_with(task)
- self.node.refresh()
- capabilities = self.node.instance_info['capabilities']
- self.assertEqual('local', capabilities['boot_option'])
- self.assertEqual('roar', capabilities['lion'])
-
- @mock.patch('ironic.drivers.modules.agent.check_image_size',
- autospec=True)
- @mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
- autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
- autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
- @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
- @mock.patch.object(image_service.HttpImageService, 'validate_href',
- autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork,
- 'add_provisioning_network',
- spec_set=True, autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork,
- 'unconfigure_tenant_networks',
- spec_set=True, autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork, 'validate',
- spec_set=True, autospec=True)
- def test_prepare_with_neutron_net_exc_no_capabilities(
- self, validate_net_mock,
- unconfigure_tenant_net_mock, add_provisioning_net_mock,
- validate_href_mock, build_options_mock,
- pxe_prepare_ramdisk_mock, storage_driver_info_mock,
- storage_attach_volumes_mock, check_image_size_mock):
- node = self.node
- node.network_interface = 'neutron'
- node.save()
- validate_net_mock.side_effect = [
- exception.InvalidParameterValue('invalid'), None]
- with task_manager.acquire(
- self.context, self.node['uuid'], shared=False) as task:
- task.node.provision_state = states.DEPLOYING
- build_options_mock.return_value = {'a': 'b'}
- self.driver.prepare(task)
- storage_driver_info_mock.assert_called_once_with(task)
- self.assertEqual(2, validate_net_mock.call_count)
- add_provisioning_net_mock.assert_called_once_with(mock.ANY, task)
- unconfigure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- storage_attach_volumes_mock.assert_called_once_with(
- task.driver.storage, task)
- validate_href_mock.assert_called_once_with(mock.ANY, 'fake-image',
- secret=False)
- build_options_mock.assert_called_once_with(task.node)
- pxe_prepare_ramdisk_mock.assert_called_once_with(
- task.driver.boot, task, {'a': 'b'})
- check_image_size_mock.assert_called_once_with(task)
- self.node.refresh()
- capabilities = self.node.instance_info['capabilities']
- self.assertEqual('local', capabilities['boot_option'])
-
- @mock.patch('ironic.drivers.modules.agent.check_image_size',
- autospec=True)
- @mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
- autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
- autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
- @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
- @mock.patch.object(image_service.HttpImageService, 'validate_href',
- autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork,
- 'add_provisioning_network',
- spec_set=True, autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork,
- 'unconfigure_tenant_networks',
- spec_set=True, autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork, 'validate',
- spec_set=True, autospec=True)
- def test_prepare_with_neutron_net_exc_no_capabilities_overwrite(
- self, validate_net_mock,
- unconfigure_tenant_net_mock, add_provisioning_net_mock,
- validate_href_mock, build_options_mock,
- pxe_prepare_ramdisk_mock, storage_driver_info_mock,
- storage_attach_volumes_mock, check_image_size_mock):
- node = self.node
- node.network_interface = 'neutron'
- instance_info = node.instance_info
- instance_info['capabilities'] = {"cat": "meow"}
- node.instance_info = instance_info
- node.save()
- validate_net_mock.side_effect = [
- exception.InvalidParameterValue('invalid'), None]
- with task_manager.acquire(
- self.context, self.node['uuid'], shared=False) as task:
- task.node.provision_state = states.DEPLOYING
- build_options_mock.return_value = {'a': 'b'}
- self.driver.prepare(task)
- storage_driver_info_mock.assert_called_once_with(task)
- self.assertEqual(2, validate_net_mock.call_count)
- add_provisioning_net_mock.assert_called_once_with(mock.ANY, task)
- unconfigure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- storage_attach_volumes_mock.assert_called_once_with(
- task.driver.storage, task)
- validate_href_mock.assert_called_once_with(mock.ANY, 'fake-image',
- secret=False)
- build_options_mock.assert_called_once_with(task.node)
- pxe_prepare_ramdisk_mock.assert_called_once_with(
- task.driver.boot, task, {'a': 'b'})
- check_image_size_mock.assert_called_once_with(task)
- self.node.refresh()
- capabilities = self.node.instance_info['capabilities']
- self.assertEqual('local', capabilities['boot_option'])
- self.assertEqual('meow', capabilities['cat'])
-
- @mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
- autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
- autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
- @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
- autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork,
- 'add_provisioning_network',
- spec_set=True, autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork,
- 'unconfigure_tenant_networks',
- spec_set=True, autospec=True)
- @mock.patch.object(neutron_network.NeutronNetwork, 'validate',
- spec_set=True, autospec=True)
- def test_prepare_with_neutron_net_exc_reraise(
- self, validate_net_mock,
- unconfigure_tenant_net_mock, add_provisioning_net_mock,
- build_instance_info_mock, build_options_mock,
- pxe_prepare_ramdisk_mock, storage_driver_info_mock,
- storage_attach_volumes_mock):
- node = self.node
- node.network_interface = 'neutron'
- instance_info = node.instance_info
- instance_info['capabilities'] = {"boot_option": "netboot"}
- node.instance_info = instance_info
- node.save()
- validate_net_mock.side_effect = (
- exception.InvalidParameterValue('invalid'))
- with task_manager.acquire(
- self.context, self.node['uuid'], shared=False) as task:
- task.node.provision_state = states.DEPLOYING
- self.assertRaises(exception.InvalidParameterValue,
- task.driver.deploy.prepare,
- task)
- storage_driver_info_mock.assert_called_once_with(task)
- validate_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertFalse(add_provisioning_net_mock.called)
- self.assertFalse(unconfigure_tenant_net_mock.called)
- self.assertFalse(storage_attach_volumes_mock.called)
- self.assertFalse(build_instance_info_mock.called)
- self.assertFalse(build_options_mock.called)
- self.assertFalse(pxe_prepare_ramdisk_mock.called)
- self.node.refresh()
- capabilities = self.node.instance_info['capabilities']
- self.assertEqual('netboot', capabilities['boot_option'])
-
- @mock.patch('ironic.drivers.modules.agent.check_image_size',
- autospec=True)
@mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network',
spec_set=True, autospec=True)
@mock.patch.object(flat_network.FlatNetwork, 'validate',
@@ -1643,8 +1436,6 @@ class TestAgentDeploy(CommonTestsMixin, db_base.DbTestCase):
def test_prepare_instance_boot_partition_image(self, prepare_instance_mock,
uuid_mock, boot_mode_mock,
log_mock):
- self.node.instance_info = {
- 'capabilities': {'boot_option': 'netboot'}}
uuid_mock.return_value = {
'command_result': {'root uuid': 'root_uuid'}
}
diff --git a/ironic/tests/unit/drivers/modules/test_agent_base.py b/ironic/tests/unit/drivers/modules/test_agent_base.py
index 97daca79f..f8b23e9fb 100644
--- a/ironic/tests/unit/drivers/modules/test_agent_base.py
+++ b/ironic/tests/unit/drivers/modules/test_agent_base.py
@@ -1426,40 +1426,11 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
- @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
@mock.patch.object(agent_base.AgentDeployMixin,
'configure_local_boot', autospec=True)
- def test_prepare_instance_to_boot_netboot(self, configure_mock,
- boot_option_mock,
- prepare_instance_mock,
- failed_state_mock):
- boot_option_mock.return_value = 'netboot'
- prepare_instance_mock.return_value = None
- self.node.provision_state = states.DEPLOYING
- self.node.target_provision_state = states.ACTIVE
- self.node.save()
- root_uuid = 'root_uuid'
- efi_system_part_uuid = 'efi_sys_uuid'
- with task_manager.acquire(self.context, self.node['uuid'],
- shared=False) as task:
- self.deploy.prepare_instance_to_boot(task, root_uuid,
- efi_system_part_uuid)
- self.assertFalse(configure_mock.called)
- boot_option_mock.assert_called_once_with(task.node)
- prepare_instance_mock.assert_called_once_with(task.driver.boot,
- task)
- self.assertFalse(failed_state_mock.called)
-
- @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
- @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
- @mock.patch.object(agent_base.AgentDeployMixin,
- 'configure_local_boot', autospec=True)
- def test_prepare_instance_to_boot_localboot(self, configure_mock,
- boot_option_mock,
- prepare_instance_mock,
- failed_state_mock):
- boot_option_mock.return_value = 'local'
+ def test_prepare_instance_to_boot(self, configure_mock,
+ prepare_instance_mock,
+ failed_state_mock):
prepare_instance_mock.return_value = None
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
@@ -1475,20 +1446,16 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
root_uuid=root_uuid,
efi_system_part_uuid=efi_system_part_uuid,
prep_boot_part_uuid=None)
- boot_option_mock.assert_called_once_with(task.node)
prepare_instance_mock.assert_called_once_with(task.driver.boot,
task)
self.assertFalse(failed_state_mock.called)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
- @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
@mock.patch.object(agent_base.AgentDeployMixin,
'configure_local_boot', autospec=True)
def test_prepare_instance_to_boot_localboot_prep_partition(
- self, configure_mock, boot_option_mock,
- prepare_instance_mock, failed_state_mock):
- boot_option_mock.return_value = 'local'
+ self, configure_mock, prepare_instance_mock, failed_state_mock):
prepare_instance_mock.return_value = None
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
@@ -1506,21 +1473,17 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
root_uuid=root_uuid,
efi_system_part_uuid=efi_system_part_uuid,
prep_boot_part_uuid=prep_boot_part_uuid)
- boot_option_mock.assert_called_once_with(task.node)
prepare_instance_mock.assert_called_once_with(task.driver.boot,
task)
self.assertFalse(failed_state_mock.called)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
- @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
@mock.patch.object(agent_base.AgentDeployMixin,
'configure_local_boot', autospec=True)
def test_prepare_instance_to_boot_configure_fails(self, configure_mock,
- boot_option_mock,
prepare_mock,
failed_state_mock):
- boot_option_mock.return_value = 'local'
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
@@ -1542,7 +1505,6 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
root_uuid=root_uuid,
efi_system_part_uuid=efi_system_part_uuid,
prep_boot_part_uuid=None)
- boot_option_mock.assert_called_once_with(task.node)
self.assertFalse(prepare_mock.called)
self.assertFalse(failed_state_mock.called)
diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
index 2bcdf1cb6..1177e9743 100644
--- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
@@ -54,27 +54,6 @@ kernel deploy_kernel
append initrd=deploy_ramdisk
ipappend 3
-label boot_partition
-kernel kernel
-append initrd=ramdisk root={{ ROOT }}
-
-label boot_whole_disk
-COM32 chain.c32
-append mbr:{{ DISK_IDENTIFIER }}
-"""
-
-_PXECONF_BOOT_PARTITION = """
-default boot_partition
-
-label deploy
-kernel deploy_kernel
-append initrd=deploy_ramdisk
-ipappend 3
-
-label boot_partition
-kernel kernel
-append initrd=ramdisk root=UUID=12345678-1234-1234-1234-1234567890abcdef
-
label boot_whole_disk
COM32 chain.c32
append mbr:{{ DISK_IDENTIFIER }}
@@ -88,10 +67,6 @@ kernel deploy_kernel
append initrd=deploy_ramdisk
ipappend 3
-label boot_partition
-kernel kernel
-append initrd=ramdisk root={{ ROOT }}
-
label boot_whole_disk
COM32 chain.c32
append mbr:0x12345678
@@ -109,34 +84,6 @@ kernel deploy_kernel
initrd deploy_ramdisk
boot
-:boot_partition
-kernel kernel
-append initrd=ramdisk root={{ ROOT }}
-boot
-
-:boot_whole_disk
-kernel chain.c32
-append mbr:{{ DISK_IDENTIFIER }}
-boot
-"""
-
-_IPXECONF_BOOT_PARTITION = """
-#!ipxe
-
-dhcp
-
-goto boot_partition
-
-:deploy
-kernel deploy_kernel
-initrd deploy_ramdisk
-boot
-
-:boot_partition
-kernel kernel
-append initrd=ramdisk root=UUID=12345678-1234-1234-1234-1234567890abcdef
-boot
-
:boot_whole_disk
kernel chain.c32
append mbr:{{ DISK_IDENTIFIER }}
@@ -155,11 +102,6 @@ kernel deploy_kernel
initrd deploy_ramdisk
boot
-:boot_partition
-kernel kernel
-append initrd=ramdisk root={{ ROOT }}
-boot
-
:boot_whole_disk
kernel chain.c32
append mbr:0x12345678
@@ -178,11 +120,6 @@ kernel deploy_kernel
initrd deploy_ramdisk
boot
-:boot_partition
-kernel kernel
-append initrd=ramdisk root=UUID=0x12345678
-boot
-
:boot_whole_disk
kernel chain.c32
append mbr:{{ DISK_IDENTIFIER }}
@@ -197,29 +134,6 @@ image=deploy_kernel
initrd=deploy_ramdisk
append="ro text"
-image=kernel
- label=boot_partition
- initrd=ramdisk
- append="root={{ ROOT }}"
-
-image=chain.c32
- label=boot_whole_disk
- append="mbr:{{ DISK_IDENTIFIER }}"
-"""
-
-_UEFI_PXECONF_BOOT_PARTITION = """
-default=boot_partition
-
-image=deploy_kernel
- label=deploy
- initrd=deploy_ramdisk
- append="ro text"
-
-image=kernel
- label=boot_partition
- initrd=ramdisk
- append="root=UUID=12345678-1234-1234-1234-1234567890abcdef"
-
image=chain.c32
label=boot_whole_disk
append="mbr:{{ DISK_IDENTIFIER }}"
@@ -233,11 +147,6 @@ image=deploy_kernel
initrd=deploy_ramdisk
append="ro text"
-image=kernel
- label=boot_partition
- initrd=ramdisk
- append="root={{ ROOT }}"
-
image=chain.c32
label=boot_whole_disk
append="mbr:0x12345678"
@@ -253,31 +162,6 @@ menuentry "deploy" {
initrdefi deploy_ramdisk
}
-menuentry "boot_partition" {
- linuxefi kernel "root=(( ROOT ))"
- initrdefi ramdisk
-}
-
-menuentry "boot_whole_disk" {
- linuxefi chain.c32 mbr:(( DISK_IDENTIFIER ))
-}
-"""
-
-_UEFI_PXECONF_BOOT_PARTITION_GRUB = """
-set default=boot_partition
-set timeout=5
-set hidden_timeout_quiet=false
-
-menuentry "deploy" {
- linuxefi deploy_kernel "ro text"
- initrdefi deploy_ramdisk
-}
-
-menuentry "boot_partition" {
- linuxefi kernel "root=UUID=12345678-1234-1234-1234-1234567890abcdef"
- initrdefi ramdisk
-}
-
menuentry "boot_whole_disk" {
linuxefi chain.c32 mbr:(( DISK_IDENTIFIER ))
}
@@ -293,11 +177,6 @@ menuentry "deploy" {
initrdefi deploy_ramdisk
}
-menuentry "boot_partition" {
- linuxefi kernel "root=(( ROOT ))"
- initrdefi ramdisk
-}
-
menuentry "boot_whole_disk" {
linuxefi chain.c32 mbr:0x12345678
}
@@ -322,17 +201,6 @@ class SwitchPxeConfigTestCase(tests_base.TestCase):
self.addCleanup(os.unlink, fname)
return fname
- def test_switch_pxe_config_partition_image(self):
- boot_mode = 'bios'
- fname = self._create_config()
- utils.switch_pxe_config(fname,
- '12345678-1234-1234-1234-1234567890abcdef',
- boot_mode,
- False)
- with open(fname, 'r') as f:
- pxeconf = f.read()
- self.assertEqual(_PXECONF_BOOT_PARTITION, pxeconf)
-
def test_switch_pxe_config_whole_disk_image(self):
boot_mode = 'bios'
fname = self._create_config()
@@ -344,18 +212,6 @@ class SwitchPxeConfigTestCase(tests_base.TestCase):
pxeconf = f.read()
self.assertEqual(_PXECONF_BOOT_WHOLE_DISK, pxeconf)
- def test_switch_ipxe_config_partition_image(self):
- boot_mode = 'bios'
- fname = self._create_config(ipxe=True)
- utils.switch_pxe_config(fname,
- '12345678-1234-1234-1234-1234567890abcdef',
- boot_mode,
- False,
- ipxe_enabled=True)
- with open(fname, 'r') as f:
- pxeconf = f.read()
- self.assertEqual(_IPXECONF_BOOT_PARTITION, pxeconf)
-
def test_switch_ipxe_config_whole_disk_image(self):
boot_mode = 'bios'
fname = self._create_config(ipxe=True)
@@ -370,19 +226,6 @@ class SwitchPxeConfigTestCase(tests_base.TestCase):
# NOTE(TheJulia): Remove elilo support after the deprecation period,
# in the Queens release.
- def test_switch_uefi_elilo_pxe_config_partition_image(self):
- boot_mode = 'uefi'
- fname = self._create_config(boot_mode=boot_mode)
- utils.switch_pxe_config(fname,
- '12345678-1234-1234-1234-1234567890abcdef',
- boot_mode,
- False)
- with open(fname, 'r') as f:
- pxeconf = f.read()
- self.assertEqual(_UEFI_PXECONF_BOOT_PARTITION, pxeconf)
-
- # NOTE(TheJulia): Remove elilo support after the deprecation period,
- # in the Queens release.
def test_switch_uefi_elilo_config_whole_disk_image(self):
boot_mode = 'uefi'
fname = self._create_config(boot_mode=boot_mode)
@@ -394,17 +237,6 @@ class SwitchPxeConfigTestCase(tests_base.TestCase):
pxeconf = f.read()
self.assertEqual(_UEFI_PXECONF_BOOT_WHOLE_DISK, pxeconf)
- def test_switch_uefi_grub_pxe_config_partition_image(self):
- boot_mode = 'uefi'
- fname = self._create_config(boot_mode=boot_mode, boot_loader='grub')
- utils.switch_pxe_config(fname,
- '12345678-1234-1234-1234-1234567890abcdef',
- boot_mode,
- False)
- with open(fname, 'r') as f:
- pxeconf = f.read()
- self.assertEqual(_UEFI_PXECONF_BOOT_PARTITION_GRUB, pxeconf)
-
def test_switch_uefi_grub_config_whole_disk_image(self):
boot_mode = 'uefi'
fname = self._create_config(boot_mode=boot_mode, boot_loader='grub')
@@ -416,18 +248,6 @@ class SwitchPxeConfigTestCase(tests_base.TestCase):
pxeconf = f.read()
self.assertEqual(_UEFI_PXECONF_BOOT_WHOLE_DISK_GRUB, pxeconf)
- def test_switch_uefi_ipxe_config_partition_image(self):
- boot_mode = 'uefi'
- fname = self._create_config(boot_mode=boot_mode, ipxe=True)
- utils.switch_pxe_config(fname,
- '12345678-1234-1234-1234-1234567890abcdef',
- boot_mode,
- False,
- ipxe_enabled=True)
- with open(fname, 'r') as f:
- pxeconf = f.read()
- self.assertEqual(_IPXECONF_BOOT_PARTITION, pxeconf)
-
def test_switch_uefi_ipxe_config_whole_disk_image(self):
boot_mode = 'uefi'
fname = self._create_config(boot_mode=boot_mode, ipxe=True)
@@ -738,36 +558,11 @@ class OtherFunctionTestCase(db_base.DbTestCase):
self._test_set_failed_state(collect_logs=False)
self.assertFalse(mock_collect.called)
- def test_get_boot_option(self):
- self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
- result = utils.get_boot_option(self.node)
- self.assertEqual("local", result)
-
def test_get_boot_option_default_value(self):
self.node.instance_info = {}
result = utils.get_boot_option(self.node)
self.assertEqual("local", result)
- def test_get_boot_option_overridden_default_value(self):
- cfg.CONF.set_override('default_boot_option', 'local', 'deploy')
- self.node.instance_info = {}
- result = utils.get_boot_option(self.node)
- self.assertEqual("local", result)
-
- def test_get_boot_option_instance_info_priority(self):
- cfg.CONF.set_override('default_boot_option', 'local', 'deploy')
- self.node.instance_info = {'capabilities':
- '{"boot_option": "netboot"}'}
- result = utils.get_boot_option(self.node)
- self.assertEqual("netboot", result)
-
- @mock.patch.object(utils, 'is_software_raid', autospec=True)
- def test_get_boot_option_software_raid(self, mock_is_software_raid):
- mock_is_software_raid.return_value = True
- cfg.CONF.set_override('default_boot_option', 'netboot', 'deploy')
- result = utils.get_boot_option(self.node)
- self.assertEqual("local", result)
-
@mock.patch.object(utils, 'is_anaconda_deploy', autospec=True)
def test_get_boot_option_anaconda_deploy(self, mock_is_anaconda_deploy):
mock_is_anaconda_deploy.return_value = True
@@ -972,8 +767,6 @@ class ParseInstanceInfoCapabilitiesTestCase(tests_base.TestCase):
utils.validate_capabilities, self.node)
def test_all_supported_capabilities(self):
- self.assertEqual(('local', 'netboot', 'ramdisk', 'kickstart'),
- utils.SUPPORTED_CAPABILITIES['boot_option'])
self.assertEqual(('bios', 'uefi'),
utils.SUPPORTED_CAPABILITIES['boot_mode'])
self.assertEqual(('true', 'false'),
@@ -1302,38 +1095,6 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
utils.validate_image_properties(self.task, inst_info)
@mock.patch.object(utils, 'get_boot_option', autospec=True,
- return_value='netboot')
- @mock.patch.object(image_service, 'get_image_service', autospec=True)
- def test_validate_image_properties_glance_image(self, image_service_mock,
- boot_options_mock):
- inst_info = utils.get_image_instance_info(self.node)
- image_service_mock.return_value.show.return_value = {
- 'properties': {'kernel_id': '1111', 'ramdisk_id': '2222'},
- }
-
- utils.validate_image_properties(self.task, inst_info)
- image_service_mock.assert_called_once_with(
- self.node.instance_info['image_source'], context=self.context
- )
-
- @mock.patch.object(utils, 'get_boot_option', autospec=True,
- return_value='netboot')
- @mock.patch.object(image_service, 'get_image_service', autospec=True)
- def test_validate_image_properties_glance_image_missing_prop(
- self, image_service_mock, boot_options_mock):
- inst_info = utils.get_image_instance_info(self.node)
- image_service_mock.return_value.show.return_value = {
- 'properties': {'kernel_id': '1111'},
- }
-
- self.assertRaises(exception.MissingParameterValue,
- utils.validate_image_properties,
- self.task, inst_info)
- image_service_mock.assert_called_once_with(
- self.node.instance_info['image_source'], context=self.context
- )
-
- @mock.patch.object(utils, 'get_boot_option', autospec=True,
return_value='kickstart')
@mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_validate_image_properties_glance_image_missing_stage2_id(
@@ -1351,7 +1112,7 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
)
@mock.patch.object(utils, 'get_boot_option', autospec=True,
- return_value='netboot')
+ return_value='kickstart')
@mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_validate_image_properties_glance_image_not_authorized(
self, image_service_mock, boot_options_mock):
@@ -1363,7 +1124,7 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
inst_info)
@mock.patch.object(utils, 'get_boot_option', autospec=True,
- return_value='netboot')
+ return_value='kickstart')
@mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_validate_image_properties_glance_image_not_found(
self, image_service_mock, boot_options_mock):
@@ -1381,7 +1142,7 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
inst_info)
@mock.patch.object(utils, 'get_boot_option', autospec=True,
- return_value='netboot')
+ return_value='kickstart')
def test_validate_image_properties_nonglance_image(
self, boot_options_mock):
instance_info = {
@@ -1473,8 +1234,8 @@ class ValidateParametersTestCase(db_base.DbTestCase):
self.assertNotIn('ramdisk', info)
@mock.patch.object(utils, 'get_boot_option', autospec=True,
- return_value='netboot')
- def test__get_img_instance_info_good_non_glance_image_netboot(
+ return_value='kickstart')
+ def test__get_img_instance_info_good_non_glance_image_anaconda(
self, mock_boot_opt):
instance_info = INST_INFO_DICT.copy()
instance_info['image_source'] = 'http://image'
@@ -1488,7 +1249,7 @@ class ValidateParametersTestCase(db_base.DbTestCase):
self.assertIsNotNone(info['kernel'])
@mock.patch.object(utils, 'get_boot_option', autospec=True,
- return_value='netboot')
+ return_value='kickstart')
def test__get_img_instance_info_non_glance_image_missing_kernel(
self, mock_boot_opt):
instance_info = INST_INFO_DICT.copy()
@@ -1501,7 +1262,7 @@ class ValidateParametersTestCase(db_base.DbTestCase):
instance_info=instance_info)
@mock.patch.object(utils, 'get_boot_option', autospec=True,
- return_value='netboot')
+ return_value='kickstart')
def test__get_img_instance_info_non_glance_image_missing_ramdisk(
self, mock_boot_opt):
instance_info = INST_INFO_DICT.copy()
@@ -1768,23 +1529,26 @@ class InstanceInfoTestCase(db_base.DbTestCase):
)
utils.parse_instance_info(node)
- def test_parse_instance_info_nonglance_image_netboot(self):
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='kickstart')
+ def test_parse_instance_info_nonglance_image_anaconda(self, mock_boot_opt):
info = INST_INFO_DICT.copy()
info['image_source'] = 'file:///image.qcow2'
info['kernel'] = 'file:///image.vmlinuz'
info['ramdisk'] = 'file:///image.initrd'
- info['capabilities'] = {'boot_option': 'netboot'}
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
utils.parse_instance_info(node)
- def test_parse_instance_info_nonglance_image_no_kernel(self):
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='kickstart')
+ def test_parse_instance_info_nonglance_image_no_kernel(self,
+ mock_boot_opt):
info = INST_INFO_DICT.copy()
info['image_source'] = 'file:///image.qcow2'
info['ramdisk'] = 'file:///image.initrd'
- info['capabilities'] = {'boot_option': 'netboot'}
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
@@ -1947,12 +1711,15 @@ class TestBuildInstanceInfoForDeploy(db_base.DbTestCase):
self.assertEqual(expected_i_info, info)
parse_instance_info_mock.assert_called_once_with(task.node)
+ @mock.patch.object(utils, 'get_boot_option', autospec=True,
+ return_value='kickstart')
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
@mock.patch.object(utils, 'parse_instance_info', autospec=True)
@mock.patch.object(image_service, 'GlanceImageService', autospec=True)
- def test_build_instance_info_for_deploy_glance_partition_image_netboot(
- self, glance_mock, parse_instance_info_mock, validate_mock):
+ def test_build_instance_info_for_deploy_glance_partition_image_anaconda(
+ self, glance_mock, parse_instance_info_mock, validate_mock,
+ boot_opt_mock):
i_info = {}
i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810'
i_info['kernel'] = '13ce5a56-1de3-4916-b8b2-be778645d003'
@@ -1962,7 +1729,6 @@ class TestBuildInstanceInfoForDeploy(db_base.DbTestCase):
i_info['ephemeral_gb'] = 0
i_info['ephemeral_format'] = None
i_info['configdrive'] = 'configdrive'
- i_info['capabilities'] = {'boot_option': 'netboot'}
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
@@ -1980,8 +1746,7 @@ class TestBuildInstanceInfoForDeploy(db_base.DbTestCase):
glance_obj_mock.swift_temp_url.return_value = 'http://temp-url'
parse_instance_info_mock.return_value = {'swap_mb': 4}
image_source = '733d1c44-a2ea-414b-aca7-69decf20d810'
- expected_i_info = {'capabilities': {'boot_option': 'netboot'},
- 'root_gb': 5,
+ expected_i_info = {'root_gb': 5,
'swap_mb': 4,
'ephemeral_gb': 0,
'ephemeral_format': None,
diff --git a/ironic/tests/unit/drivers/modules/test_image_utils.py b/ironic/tests/unit/drivers/modules/test_image_utils.py
index 6d79629d9..753452f5d 100644
--- a/ironic/tests/unit/drivers/modules/test_image_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_image_utils.py
@@ -151,6 +151,32 @@ class RedfishImageHandlerTestCase(db_base.DbTestCase):
@mock.patch.object(image_utils, 'shutil', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(os, 'mkdir', autospec=True)
+ def test_publish_image_external_ip_node_override(
+ self, mock_mkdir, mock_link, mock_shutil, mock_chmod):
+ self.config(use_swift=False, group='redfish')
+ self.config(http_url='http://localhost',
+ external_http_url='http://non-local.host',
+ group='deploy')
+ img_handler_obj = image_utils.ImageHandler(self.node.driver)
+ self.node.driver_info["external_http_url"] = "http://node.override.url"
+
+ override_url = self.node.driver_info.get("external_http_url")
+
+ url = img_handler_obj.publish_image('file.iso', 'boot.iso',
+ override_url)
+
+ self.assertEqual(
+ 'http://node.override.url/redfish/boot.iso', url)
+
+ mock_mkdir.assert_called_once_with('/httpboot/redfish', 0o755)
+ mock_link.assert_called_once_with(
+ 'file.iso', '/httpboot/redfish/boot.iso')
+ mock_chmod.assert_called_once_with('file.iso', 0o644)
+
+ @mock.patch.object(os, 'chmod', autospec=True)
+ @mock.patch.object(image_utils, 'shutil', autospec=True)
+ @mock.patch.object(os, 'link', autospec=True)
+ @mock.patch.object(os, 'mkdir', autospec=True)
def test_publish_image_local_copy(self, mock_mkdir, mock_link,
mock_shutil, mock_chmod):
self.config(use_swift=False, group='redfish')
@@ -271,8 +297,8 @@ class RedfishImageUtilsTestCase(db_base.DbTestCase):
object_name = 'image-%s' % task.node.uuid
- mock_publish_image.assert_called_once_with(mock.ANY,
- mock.ANY, object_name)
+ mock_publish_image.assert_called_once_with(mock.ANY, mock.ANY,
+ object_name, None)
mock_create_vfat_image.assert_called_once_with(
mock.ANY, parameters=None)
@@ -295,8 +321,63 @@ class RedfishImageUtilsTestCase(db_base.DbTestCase):
object_name = 'image-%s' % task.node.uuid
- mock_publish_image.assert_called_once_with(mock.ANY,
- mock.ANY, object_name)
+ mock_publish_image.assert_called_once_with(mock.ANY, mock.ANY,
+ object_name, None)
+
+ mock_create_vfat_image.assert_called_once_with(
+ mock.ANY, parameters={"ipa-api-url": "http://callback"})
+
+ self.assertEqual(expected_url, url)
+
+ @mock.patch.object(image_utils.ImageHandler, 'publish_image',
+ autospec=True)
+ @mock.patch.object(images, 'create_vfat_image', autospec=True)
+ def test_prepare_floppy_image_publish_with_config_external_http_url(
+ self, mock_create_vfat_image, mock_publish_image):
+ self.config(external_callback_url='http://callback/',
+ external_http_url='http://config.external.url',
+ group='deploy')
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ expected_url = 'https://config.external.url/c.f?e=f'
+
+ mock_publish_image.return_value = expected_url
+
+ url = image_utils.prepare_floppy_image(task)
+
+ object_name = 'image-%s' % task.node.uuid
+
+ mock_publish_image.assert_called_once_with(mock.ANY, mock.ANY,
+ object_name, None)
+
+ mock_create_vfat_image.assert_called_once_with(
+ mock.ANY, parameters={"ipa-api-url": "http://callback"})
+
+ self.assertEqual(expected_url, url)
+
+ @mock.patch.object(image_utils.ImageHandler, 'publish_image',
+ autospec=True)
+ @mock.patch.object(images, 'create_vfat_image', autospec=True)
+ def test_prepare_floppy_image_publish_with_node_external_http_url(
+ self, mock_create_vfat_image, mock_publish_image):
+ self.config(external_callback_url='http://callback/',
+ external_http_url='http://config.external.url',
+ group='deploy')
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.driver_info["external_http_url"] = \
+ "https://node.external"
+ override_url = task.node.driver_info.get("external_http_url")
+ expected_url = '"https://node.external/c.f?e=f'
+
+ mock_publish_image.return_value = expected_url
+
+ url = image_utils.prepare_floppy_image(task)
+
+ object_name = 'image-%s' % task.node.uuid
+
+ mock_publish_image.assert_called_once_with(
+ mock.ANY, mock.ANY, object_name, override_url)
mock_create_vfat_image.assert_called_once_with(
mock.ANY, parameters={"ipa-api-url": "http://callback"})
diff --git a/ironic/tests/unit/drivers/modules/test_ipxe.py b/ironic/tests/unit/drivers/modules/test_ipxe.py
index d9dd126b3..ef37e3fc4 100644
--- a/ironic/tests/unit/drivers/modules/test_ipxe.py
+++ b/ironic/tests/unit/drivers/modules/test_ipxe.py
@@ -169,16 +169,6 @@ class iPXEBootTestCase(db_base.DbTestCase):
task.driver.boot.validate(task)
mock_boot_option.assert_called_with(task.node)
- @mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
- return_value='netboot', autospec=True)
- def test_validate_fail_missing_image_source(self, mock_boot_option):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- del task.node['instance_info']['image_source']
- self.assertRaises(exception.MissingParameterValue,
- task.driver.boot.validate, task)
- mock_boot_option.assert_called_with(task.node)
-
def test_validate_fail_no_port(self):
new_node = obj_utils.create_test_node(
self.context,
@@ -190,48 +180,6 @@ class iPXEBootTestCase(db_base.DbTestCase):
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
- @mock.patch.object(image_service.GlanceImageService, 'show',
- autospec=True)
- def test_validate_fail_no_image_kernel_ramdisk_props(self, mock_glance):
- instance_info = {"boot_option": "netboot"}
- mock_glance.return_value = {'properties': {}}
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- task.node.instance_info['capabilities'] = instance_info
- self.assertRaises(exception.MissingParameterValue,
- task.driver.boot.validate,
- task)
-
- @mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
- return_value='netboot', autospec=True)
- @mock.patch.object(image_service.GlanceImageService, 'show',
- autospec=True)
- def test_validate_fail_glance_image_doesnt_exists(self, mock_glance,
- mock_boot_option):
- mock_glance.side_effect = exception.ImageNotFound('not found')
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- self.assertRaises(exception.InvalidParameterValue,
- task.driver.boot.validate, task)
- mock_boot_option.assert_called_with(task.node)
-
- @mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
- return_value='netboot', autospec=True)
- @mock.patch.object(image_service.GlanceImageService, 'show',
- autospec=True)
- def test_validate_fail_glance_conn_problem(self, mock_glance,
- mock_boot_option):
- exceptions = (exception.GlanceConnectionFailed('connection fail'),
- exception.ImageNotAuthorized('not authorized'),
- exception.Invalid('invalid'))
- mock_glance.side_effect = exceptions
- for exc in exceptions:
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- self.assertRaises(exception.InvalidParameterValue,
- task.driver.boot.validate, task)
- mock_boot_option.assert_called_with(task.node)
-
def test_validate_inspection(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.boot.validate_inspection(task)
@@ -563,94 +511,6 @@ class iPXEBootTestCase(db_base.DbTestCase):
self.node.save()
self._test_clean_up_ramdisk(mode='rescue')
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, switch_pxe_config_mock,
- set_boot_device_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- image_info = {'kernel': ('', '/path/to/kernel'),
- 'ramdisk': ('', '/path/to/ramdisk')}
- instance_info = {"boot_option": "netboot"}
- get_image_info_mock.return_value = image_info
- with task_manager.acquire(self.context, self.node.uuid) as task:
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True, ip_version=6)
- pxe_config_path = pxe_utils.get_pxe_config_file_path(
- task.node.uuid, ipxe_enabled=True)
- task.node.properties['capabilities'] = 'boot_mode:uefi'
- task.node.instance_info['capabilities'] = instance_info
- task.node.driver_internal_info['root_uuid_or_disk_id'] = (
- "30212642-09d3-467f-8e09-21685826ab50")
- task.node.driver_internal_info['is_whole_disk_image'] = False
-
- task.driver.boot.prepare_instance(task)
-
- get_image_info_mock.assert_called_once_with(
- task, ipxe_enabled=True)
- cache_mock.assert_called_once_with(task, image_info,
- ipxe_enabled=True)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- switch_pxe_config_mock.assert_called_once_with(
- pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
- 'uefi', False, iscsi_boot=False, ramdisk_boot=False,
- ipxe_enabled=True, anaconda_boot=False)
- set_boot_device_mock.assert_called_once_with(task,
- boot_devices.PXE,
- persistent=True)
-
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot_bios(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, switch_pxe_config_mock,
- set_boot_device_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- image_info = {'kernel': ('', '/path/to/kernel'),
- 'ramdisk': ('', '/path/to/ramdisk')}
- instance_info = {"boot_option": "netboot",
- "boot_mode": "bios"}
- get_image_info_mock.return_value = image_info
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.properties['capabilities'] = 'boot_mode:bios'
- task.node.instance_info['capabilities'] = instance_info
- task.node.driver_internal_info['root_uuid_or_disk_id'] = (
- "30212642-09d3-467f-8e09-21685826ab50")
- task.node.driver_internal_info['is_whole_disk_image'] = False
-
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True, ip_version=6)
- pxe_config_path = pxe_utils.get_pxe_config_file_path(
- task.node.uuid, ipxe_enabled=True)
-
- task.driver.boot.prepare_instance(task)
-
- get_image_info_mock.assert_called_once_with(
- task, ipxe_enabled=True)
- cache_mock.assert_called_once_with(task, image_info,
- ipxe_enabled=True)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- switch_pxe_config_mock.assert_called_once_with(
- pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
- 'bios', False, iscsi_boot=False, ramdisk_boot=False,
- ipxe_enabled=True, anaconda_boot=False)
- set_boot_device_mock.assert_called_once_with(task,
- boot_devices.PXE,
- persistent=True)
-
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@@ -665,8 +525,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
- i_info_caps = {"boot_option": "ramdisk",
- "boot_mode": "bios"}
+ i_info_caps = {"boot_mode": "bios"}
kernel_arg = "meow"
get_image_info_mock.return_value = image_info
@@ -676,6 +535,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
i_info['capabilities'] = i_info_caps
i_info['kernel_append_params'] = kernel_arg
task.node.instance_info = i_info
+ task.node.deploy_interface = 'ramdisk'
task.node.save()
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True)
@@ -721,15 +581,14 @@ class iPXEBootTestCase(db_base.DbTestCase):
dhcp_factory_mock.return_value = provider_mock
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
- i_info_caps = {"boot_option": "ramdisk"}
kernel_arg = "meow"
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
i_info = task.node.instance_info
- i_info['capabilities'] = i_info_caps
i_info['kernel_append_params'] = kernel_arg
task.node.instance_info = i_info
+ task.node.deploy_interface = 'ramdisk'
task.node.save()
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True)
@@ -761,160 +620,6 @@ class iPXEBootTestCase(db_base.DbTestCase):
mock_create_pxe_config.assert_called_once_with(
task, expected_params, mock.ANY, ipxe_enabled=True)
- @mock.patch('os.path.isfile', return_value=False, autospec=True)
- @mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot_active(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, switch_pxe_config_mock,
- set_boot_device_mock, create_pxe_config_mock, isfile_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- image_info = {'kernel': ('', '/path/to/kernel'),
- 'ramdisk': ('', '/path/to/ramdisk')}
- instance_info = {"boot_option": "netboot"}
- get_image_info_mock.return_value = image_info
- self.node.provision_state = states.ACTIVE
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.properties['capabilities'] = 'boot_mode:bios'
- task.node.instance_info['capabilities'] = instance_info
- task.node.driver_internal_info['root_uuid_or_disk_id'] = (
- "30212642-09d3-467f-8e09-21685826ab50")
- task.node.driver_internal_info['is_whole_disk_image'] = False
-
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True, ip_version=6)
- pxe_config_path = pxe_utils.get_pxe_config_file_path(
- task.node.uuid, ipxe_enabled=True)
-
- task.driver.boot.prepare_instance(task)
-
- get_image_info_mock.assert_called_once_with(
- task, ipxe_enabled=True)
- cache_mock.assert_called_once_with(task, image_info,
- ipxe_enabled=True)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- create_pxe_config_mock.assert_called_once_with(
- task, mock.ANY, CONF.pxe.ipxe_config_template,
- ipxe_enabled=True)
- switch_pxe_config_mock.assert_called_once_with(
- pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
- 'bios', False, iscsi_boot=False, ramdisk_boot=False,
- ipxe_enabled=True, anaconda_boot=False)
- self.assertFalse(set_boot_device_mock.called)
-
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot_missing_root_uuid(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, switch_pxe_config_mock,
- set_boot_device_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- image_info = {'kernel': ('', '/path/to/kernel'),
- 'ramdisk': ('', '/path/to/ramdisk')}
- get_image_info_mock.return_value = image_info
- instance_info = {"boot_option": "netboot"}
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.properties['capabilities'] = 'boot_mode:bios'
- task.node.instance_info['capabilities'] = instance_info
- task.node.driver_internal_info['is_whole_disk_image'] = False
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True, ip_version=4)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True, ip_version=6)
-
- task.driver.boot.prepare_instance(task)
-
- get_image_info_mock.assert_called_once_with(
- task, ipxe_enabled=True)
- cache_mock.assert_called_once_with(task, image_info,
- ipxe_enabled=True)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- self.assertFalse(switch_pxe_config_mock.called)
- self.assertFalse(set_boot_device_mock.called)
-
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot_missing_root_uuid_default(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, switch_pxe_config_mock,
- set_boot_device_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- image_info = {'kernel': ('', '/path/to/kernel'),
- 'ramdisk': ('', '/path/to/ramdisk')}
- get_image_info_mock.return_value = image_info
- instance_info = self.node.instance_info
- instance_info['capabilities'] = {"boot_option": "netboot"}
- self.node.instance_info = instance_info
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.driver_internal_info['is_whole_disk_image'] = False
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True, ip_version=4)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True, ip_version=6)
-
- task.driver.boot.prepare_instance(task)
-
- get_image_info_mock.assert_called_once_with(
- task, ipxe_enabled=True)
- cache_mock.assert_called_once_with(task, image_info,
- ipxe_enabled=True)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- self.assertFalse(switch_pxe_config_mock.called)
- self.assertFalse(set_boot_device_mock.called)
-
- # NOTE(TheJulia): The log mock below is attached to the iPXE interface
- # which directly logs the warning that is being checked for.
- @mock.patch.object(pxe_base.LOG, 'warning', autospec=True)
- @mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_whole_disk_image_missing_root_uuid(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, set_boot_device_mock,
- clean_up_pxe_mock, log_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- get_image_info_mock.return_value = {}
- instance_info = {"boot_option": "netboot",
- "boot_mode": "bios"}
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.properties['capabilities'] = 'boot_mode:bios'
- task.node.instance_info['capabilities'] = instance_info
- task.node.driver_internal_info['is_whole_disk_image'] = True
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=True, ip_version=6)
-
- task.driver.boot.prepare_instance(task)
- get_image_info_mock.assert_called_once_with(
- task, ipxe_enabled=True)
- cache_mock.assert_called_once_with(task, {}, ipxe_enabled=True)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- self.assertTrue(log_mock.called)
- clean_up_pxe_mock.assert_called_once_with(task, ipxe_enabled=True)
- set_boot_device_mock.assert_called_once_with(
- task, boot_devices.DISK, persistent=True)
-
@mock.patch('os.path.isfile', lambda filename: False)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(deploy_utils, 'is_iscsi_boot', lambda task: True)
@@ -1037,8 +742,8 @@ class iPXEBootTestCase(db_base.DbTestCase):
self.config(http_url=http_url, group='deploy')
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
- self.node.instance_info = {'boot_iso': 'http://1.2.3.4:1234/boot.iso',
- 'capabilities': {'boot_option': 'ramdisk'}}
+ self.node.deploy_interface = 'ramdisk'
+ self.node.instance_info = {'boot_iso': 'http://1.2.3.4:1234/boot.iso'}
image_info = {'kernel': ('', '/path/to/kernel'),
'deploy_kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk'),
@@ -1075,7 +780,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot_ramdisk_with_kernel_arg(
+ def test_prepare_instance_ramdisk_with_kernel_arg(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock, create_pxe_config_mock):
@@ -1130,14 +835,9 @@ class iPXEBootTestCase(db_base.DbTestCase):
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
- def test_prepare_instance_localboot(self, clean_up_pxe_config_mock,
- set_boot_device_mock,
- secure_boot_mock):
+ def test_prepare_instance(self, clean_up_pxe_config_mock,
+ set_boot_device_mock, secure_boot_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
- instance_info = task.node.instance_info
- instance_info['capabilities'] = {'boot_option': 'local'}
- task.node.instance_info = instance_info
- task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=True)
@@ -1148,15 +848,11 @@ class iPXEBootTestCase(db_base.DbTestCase):
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
- def test_prepare_instance_localboot_active(self, clean_up_pxe_config_mock,
- set_boot_device_mock):
+ def test_prepare_instance_active(self, clean_up_pxe_config_mock,
+ set_boot_device_mock):
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
- instance_info = task.node.instance_info
- instance_info['capabilities'] = {'boot_option': 'local'}
- task.node.instance_info = instance_info
- task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=True)
@@ -1168,14 +864,12 @@ class iPXEBootTestCase(db_base.DbTestCase):
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_localboot_with_fallback(
+ def test_prepare_instance_with_fallback(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
clean_up_pxe_config_mock, set_boot_device_mock):
self.config(enable_netboot_fallback=True, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.instance_info = task.node.instance_info
- task.node.instance_info['capabilities'] = {'boot_option': 'local'}
task.node.driver_internal_info['root_uuid_or_disk_id'] = (
"30212642-09d3-467f-8e09-21685826ab50")
task.node.driver_internal_info['is_whole_disk_image'] = False
diff --git a/ironic/tests/unit/drivers/modules/test_pxe.py b/ironic/tests/unit/drivers/modules/test_pxe.py
index 779606229..e7d444104 100644
--- a/ironic/tests/unit/drivers/modules/test_pxe.py
+++ b/ironic/tests/unit/drivers/modules/test_pxe.py
@@ -77,7 +77,8 @@ class PXEBootTestCase(db_base.DbTestCase):
self.config(enabled_boot_interfaces=[self.boot_interface,
'ipxe', 'fake'])
- self.config(enabled_deploy_interfaces=['fake', 'direct', 'anaconda'])
+ self.config(enabled_deploy_interfaces=['fake', 'direct', 'anaconda',
+ 'ramdisk'])
self.node = obj_utils.create_test_node(
self.context,
driver=self.driver,
@@ -144,15 +145,6 @@ class PXEBootTestCase(db_base.DbTestCase):
del task.node['instance_info']['image_source']
task.driver.boot.validate(task)
- def test_validate_fail_missing_image_source(self):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- task.node['instance_info']['capabilities'] = {
- 'boot_option': 'netboot'}
- del task.node['instance_info']['image_source']
- self.assertRaises(exception.MissingParameterValue,
- task.driver.boot.validate, task)
-
def test_validate_fail_no_port(self):
new_node = obj_utils.create_test_node(
self.context,
@@ -164,40 +156,18 @@ class PXEBootTestCase(db_base.DbTestCase):
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
- @mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
- def test_validate_fail_no_image_kernel_ramdisk_props(self, mock_glance):
- instance_info = {"boot_option": "netboot"}
- mock_glance.return_value = {'properties': {}}
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- task.node.instance_info['capabilities'] = instance_info
- self.assertRaises(exception.MissingParameterValue,
- task.driver.boot.validate,
- task)
-
- @mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
- def test_validate_fail_glance_image_doesnt_exists(self, mock_glance):
- mock_glance.side_effect = exception.ImageNotFound('not found')
+ @mock.patch.object(deploy_utils, 'get_boot_option',
+ return_value='ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'validate_image_properties',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'get_image_instance_info', autospec=True)
+ def test_validate_non_local(self, mock_get_iinfo, mock_validate,
+ mock_boot_opt):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- task.node.instance_info['capabilities'] = {
- 'boot_option': 'netboot'}
- self.assertRaises(exception.InvalidParameterValue,
- task.driver.boot.validate, task)
-
- @mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
- def test_validate_fail_glance_conn_problem(self, mock_glance):
- exceptions = (exception.GlanceConnectionFailed('connection fail'),
- exception.ImageNotAuthorized('not authorized'),
- exception.Invalid('invalid'))
- mock_glance.side_effect = exceptions
- for exc in exceptions:
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- task.node.instance_info['capabilities'] = {
- 'boot_option': 'netboot'}
- self.assertRaises(exception.InvalidParameterValue,
- task.driver.boot.validate, task)
+ task.driver.boot.validate(task)
+ mock_validate.assert_called_once_with(
+ task, mock_get_iinfo.return_value)
def test_validate_inspection(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
@@ -486,217 +456,13 @@ class PXEBootTestCase(db_base.DbTestCase):
self.node.save()
self._test_clean_up_ramdisk(mode='rescue')
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot_bios(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, switch_pxe_config_mock,
- set_boot_device_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- image_info = {'kernel': ('', '/path/to/kernel'),
- 'ramdisk': ('', '/path/to/ramdisk')}
- get_image_info_mock.return_value = image_info
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.properties['capabilities'] = 'boot_mode:bios'
- task.node.driver_internal_info['root_uuid_or_disk_id'] = (
- "30212642-09d3-467f-8e09-21685826ab50")
- task.node.driver_internal_info['is_whole_disk_image'] = False
- task.node.instance_info = {
- 'capabilities': {'boot_option': 'netboot',
- 'boot_mode': 'bios'}}
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False, ip_version=4)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False, ip_version=6)
- pxe_config_path = pxe_utils.get_pxe_config_file_path(
- task.node.uuid)
- task.driver.boot.prepare_instance(task)
-
- get_image_info_mock.assert_called_once_with(
- task, ipxe_enabled=False)
- cache_mock.assert_called_once_with(
- task, image_info, ipxe_enabled=False)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- switch_pxe_config_mock.assert_called_once_with(
- pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
- 'bios', False, iscsi_boot=False, ramdisk_boot=False,
- ipxe_enabled=False, anaconda_boot=False)
- set_boot_device_mock.assert_called_once_with(task,
- boot_devices.PXE,
- persistent=True)
-
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot_uefi(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, switch_pxe_config_mock,
- set_boot_device_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- image_info = {'kernel': ('', '/path/to/kernel'),
- 'ramdisk': ('', '/path/to/ramdisk')}
- get_image_info_mock.return_value = image_info
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.driver_internal_info['root_uuid_or_disk_id'] = (
- "30212642-09d3-467f-8e09-21685826ab50")
- task.node.driver_internal_info['is_whole_disk_image'] = False
- task.node.instance_info = {
- 'capabilities': {'boot_option': 'netboot'}}
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False, ip_version=4)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False, ip_version=6)
- pxe_config_path = pxe_utils.get_pxe_config_file_path(
- task.node.uuid)
- task.driver.boot.prepare_instance(task)
-
- get_image_info_mock.assert_called_once_with(
- task, ipxe_enabled=False)
- cache_mock.assert_called_once_with(
- task, image_info, ipxe_enabled=False)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- switch_pxe_config_mock.assert_called_once_with(
- pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
- 'uefi', False, iscsi_boot=False, ramdisk_boot=False,
- ipxe_enabled=False, anaconda_boot=False)
- set_boot_device_mock.assert_called_once_with(task,
- boot_devices.PXE,
- persistent=True)
-
- @mock.patch('os.path.isfile', return_value=False, autospec=True)
- @mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot_active(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, switch_pxe_config_mock,
- set_boot_device_mock, create_pxe_config_mock, isfile_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- image_info = {'kernel': ('', '/path/to/kernel'),
- 'ramdisk': ('', '/path/to/ramdisk')}
- instance_info = {"boot_option": "netboot"}
- get_image_info_mock.return_value = image_info
- self.node.provision_state = states.ACTIVE
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.properties['capabilities'] = 'boot_mode:bios'
- task.node.driver_internal_info['root_uuid_or_disk_id'] = (
- "30212642-09d3-467f-8e09-21685826ab50")
- task.node.driver_internal_info['is_whole_disk_image'] = False
- task.node.instance_info['capabilities'] = instance_info
- task.driver.boot.prepare_instance(task)
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False, ip_version=6)
- pxe_config_path = pxe_utils.get_pxe_config_file_path(
- task.node.uuid)
-
- get_image_info_mock.assert_called_once_with(
- task, ipxe_enabled=False)
- cache_mock.assert_called_once_with(
- task, image_info, ipxe_enabled=False)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- create_pxe_config_mock.assert_called_once_with(
- task, mock.ANY, CONF.pxe.pxe_config_template,
- ipxe_enabled=False)
- switch_pxe_config_mock.assert_called_once_with(
- pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
- 'bios', False, iscsi_boot=False, ramdisk_boot=False,
- ipxe_enabled=False, anaconda_boot=False)
- self.assertFalse(set_boot_device_mock.called)
-
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_netboot_missing_root_uuid(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, switch_pxe_config_mock,
- set_boot_device_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- image_info = {'kernel': ('', '/path/to/kernel'),
- 'ramdisk': ('', '/path/to/ramdisk')}
- instance_info = {"boot_option": "netboot"}
- get_image_info_mock.return_value = image_info
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.properties['capabilities'] = 'boot_mode:bios'
- task.node.instance_info['capabilities'] = instance_info
- task.node.driver_internal_info['is_whole_disk_image'] = False
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False, ip_version=6)
-
- task.driver.boot.prepare_instance(task)
-
- get_image_info_mock.assert_called_once_with(task,
- ipxe_enabled=False)
- cache_mock.assert_called_once_with(
- task, image_info, ipxe_enabled=False)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- self.assertFalse(switch_pxe_config_mock.called)
- self.assertFalse(set_boot_device_mock.called)
-
- @mock.patch.object(pxe_base.LOG, 'warning', autospec=True)
- @mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
- @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
- @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
- @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
- def test_prepare_instance_whole_disk_image_missing_root_uuid(
- self, get_image_info_mock, cache_mock,
- dhcp_factory_mock, set_boot_device_mock,
- clean_up_pxe_mock, log_mock):
- provider_mock = mock.MagicMock()
- dhcp_factory_mock.return_value = provider_mock
- get_image_info_mock.return_value = {}
- instance_info = {"boot_option": "netboot"}
- with task_manager.acquire(self.context, self.node.uuid) as task:
- task.node.properties['capabilities'] = 'boot_mode:bios'
- task.node.instance_info['capabilities'] = instance_info
- task.node.driver_internal_info['is_whole_disk_image'] = True
- dhcp_opts = pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False)
- dhcp_opts += pxe_utils.dhcp_options_for_instance(
- task, ipxe_enabled=False, ip_version=6)
- task.driver.boot.prepare_instance(task)
- get_image_info_mock.assert_called_once_with(task,
- ipxe_enabled=False)
- cache_mock.assert_called_once_with(
- task, {}, ipxe_enabled=False)
- provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
- self.assertTrue(log_mock.called)
- clean_up_pxe_mock.assert_called_once_with(
- task, ipxe_enabled=False)
- set_boot_device_mock.assert_called_once_with(
- task, boot_devices.DISK, persistent=True)
-
@mock.patch.object(boot_mode_utils, 'configure_secure_boot_if_needed',
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
- def test_prepare_instance_localboot(self, clean_up_pxe_config_mock,
- set_boot_device_mock,
- secure_boot_mock):
+ def test_prepare_instance(self, clean_up_pxe_config_mock,
+ set_boot_device_mock, secure_boot_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
- instance_info = task.node.instance_info
- instance_info['capabilities'] = {'boot_option': 'local'}
- task.node.instance_info = instance_info
- task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=False)
@@ -707,15 +473,11 @@ class PXEBootTestCase(db_base.DbTestCase):
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
- def test_prepare_instance_localboot_active(self, clean_up_pxe_config_mock,
- set_boot_device_mock):
+ def test_prepare_instance_active(self, clean_up_pxe_config_mock,
+ set_boot_device_mock):
self.node.provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
- instance_info = task.node.instance_info
- instance_info['capabilities'] = {'boot_option': 'local'}
- task.node.instance_info = instance_info
- task.node.save()
task.driver.boot.prepare_instance(task)
clean_up_pxe_config_mock.assert_called_once_with(
task, ipxe_enabled=False)
@@ -741,9 +503,7 @@ class PXEBootTestCase(db_base.DbTestCase):
self.node.provision_state = states.DEPLOYING
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
- instance_info = task.node.instance_info
- instance_info['capabilities'] = {'boot_option': 'ramdisk'}
- task.node.instance_info = instance_info
+ task.node.deploy_interface = 'ramdisk'
task.node.save()
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=False)
diff --git a/ironic/tests/unit/drivers/modules/test_ramdisk.py b/ironic/tests/unit/drivers/modules/test_ramdisk.py
index 4deedbfc2..66d11aa18 100644
--- a/ironic/tests/unit/drivers/modules/test_ramdisk.py
+++ b/ironic/tests/unit/drivers/modules/test_ramdisk.py
@@ -47,8 +47,6 @@ class RamdiskDeployTestCase(db_base.DbTestCase):
self.config(tftp_root=self.temp_dir, group='pxe')
self.temp_dir = tempfile.mkdtemp()
self.config(images_path=self.temp_dir, group='pxe')
- self.config(enabled_deploy_interfaces=['ramdisk'])
- self.config(enabled_boot_interfaces=['pxe'])
for iface in drivers_base.ALL_INTERFACES:
impl = 'fake'
if iface == 'network':
@@ -94,7 +92,6 @@ class RamdiskDeployTestCase(db_base.DbTestCase):
task, ipxe_enabled=False, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
- task.node.properties['capabilities'] = 'boot_option:netboot'
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.deploy.prepare(task)
task.driver.deploy.deploy(task)
@@ -122,20 +119,26 @@ class RamdiskDeployTestCase(db_base.DbTestCase):
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
mock_image_info.return_value = image_info
- i_info = self.node.instance_info
- i_info.update({'capabilities': {'boot_option': 'ramdisk'}})
- self.node.instance_info = i_info
- self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
mock_image_info.assert_called_once_with(task, ipxe_enabled=False)
mock_cache.assert_called_once_with(
task, image_info, ipxe_enabled=False)
self.assertFalse(mock_warning.called)
- i_info['configdrive'] = 'meow'
- self.node.instance_info = i_info
+
+ @mock.patch.object(ramdisk.LOG, 'warning', autospec=True)
+ @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
+ @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
+ @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
+ def test_deploy_with_configdrive(self, mock_image_info, mock_cache,
+ mock_dhcp_factory, mock_switch_config,
+ mock_warning):
+ image_info = {'kernel': ('', '/path/to/kernel'),
+ 'ramdisk': ('', '/path/to/ramdisk')}
+ mock_image_info.return_value = image_info
+ self.node.set_instance_info('configdrive', 'meow')
self.node.save()
- mock_warning.reset_mock()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
self.assertTrue(mock_warning.called)
diff --git a/ironic/tests/unit/drivers/modules/test_snmp.py b/ironic/tests/unit/drivers/modules/test_snmp.py
index 36a59a396..00799dc4d 100644
--- a/ironic/tests/unit/drivers/modules/test_snmp.py
+++ b/ironic/tests/unit/drivers/modules/test_snmp.py
@@ -327,6 +327,34 @@ class SNMPValidateParametersTestCase(db_base.DbTestCase):
info = snmp._parse_driver_info(node)
self.assertEqual('teltronix', info['driver'])
+ def test__parse_driver_info_servertech_sentry3(self):
+ # Make sure the servertech_sentry3 driver type is parsed.
+ info = db_utils.get_test_snmp_info(snmp_driver='servertech_sentry3')
+ node = self._get_test_node(info)
+ info = snmp._parse_driver_info(node)
+ self.assertEqual('servertech_sentry3', info['driver'])
+
+ def test__parse_driver_info_servertech_sentry4(self):
+ # Make sure the servertech_sentry4 driver type is parsed.
+ info = db_utils.get_test_snmp_info(snmp_driver='servertech_sentry4')
+ node = self._get_test_node(info)
+ info = snmp._parse_driver_info(node)
+ self.assertEqual('servertech_sentry4', info['driver'])
+
+ def test__parse_driver_info_raritan_pdu2(self):
+ # Make sure the raritan_pdu2 driver type is parsed.
+ info = db_utils.get_test_snmp_info(snmp_driver='raritan_pdu2')
+ node = self._get_test_node(info)
+ info = snmp._parse_driver_info(node)
+ self.assertEqual('raritan_pdu2', info['driver'])
+
+ def test__parse_driver_info_vertivgeist_pdu(self):
+ # Make sure the vertivgeist_pdu driver type is parsed.
+ info = db_utils.get_test_snmp_info(snmp_driver='vertivgeist_pdu')
+ node = self._get_test_node(info)
+ info = snmp._parse_driver_info(node)
+ self.assertEqual('vertivgeist_pdu', info['driver'])
+
def test__parse_driver_info_snmp_v1(self):
# Make sure SNMPv1 is parsed with a community string.
info = db_utils.get_test_snmp_info(snmp_version='1',
@@ -875,7 +903,7 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
def test_power_off_timeout(self, mock_sleep, mock_get_client):
# Ensure that a power off consistency poll timeout causes an error
mock_client = mock_get_client.return_value
- CONF.snmp.power_timeout = 5
+ CONF.set_override('power_timeout', 5, 'snmp')
driver = snmp._get_driver(self.node)
mock_client.get.return_value = driver.value_power_on
pstate = driver.power_off()
@@ -1260,6 +1288,58 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
def test_apc_rackpdu_power_reset(self, mock_get_client):
self._test_simple_device_power_reset('apc_rackpdu', mock_get_client)
+ def test_raritan_pdu2_snmp_objects(self, mock_get_client):
+ # Ensure the correct SNMP object OIDs and values are used by the
+ # Raritan PDU2 driver
+ self._update_driver_info(snmp_driver="raritan_pdu2",
+ snmp_outlet="6")
+ driver = snmp._get_driver(self.node)
+ oid = (1, 3, 6, 1, 4, 1, 13742, 6, 4, 1, 2, 1, 2, 1, 6)
+ action = (2,)
+
+ self.assertEqual(oid, driver._snmp_oid(action))
+ self.assertEqual(1, driver.value_power_on)
+ self.assertEqual(0, driver.value_power_off)
+
+ def test_servertech_sentry3_snmp_objects(self, mock_get_client):
+ # Ensure the correct SNMP object OIDs and values are used by the
+ # ServerTech Sentry3 driver
+ self._update_driver_info(snmp_driver="servertech_sentry3",
+ snmp_outlet="6")
+ driver = snmp._get_driver(self.node)
+ oid = (1, 3, 6, 1, 4, 1, 1718, 3, 2, 3, 1, 5, 1, 1, 6)
+ action = (5,)
+
+ self.assertEqual(oid, driver._snmp_oid(action))
+ self.assertEqual(1, driver.value_power_on)
+ self.assertEqual(2, driver.value_power_off)
+
+ def test_servertech_sentry4_snmp_objects(self, mock_get_client):
+ # Ensure the correct SNMP object OIDs and values are used by the
+ # ServerTech Sentry4 driver
+ self._update_driver_info(snmp_driver="servertech_sentry4",
+ snmp_outlet="6")
+ driver = snmp._get_driver(self.node)
+ oid = (1, 3, 6, 1, 4, 1, 1718, 4, 1, 8, 5, 1, 2, 1, 1, 6)
+ action = (2,)
+
+ self.assertEqual(oid, driver._snmp_oid(action))
+ self.assertEqual(1, driver.value_power_on)
+ self.assertEqual(2, driver.value_power_off)
+
+ def test_vertivgeist_pdu_snmp_objects(self, mock_get_client):
+ # Ensure the correct SNMP object OIDs and values are used by the
+ # Vertiv Geist PDU driver
+ self._update_driver_info(snmp_driver="vertivgeist_pdu",
+ snmp_outlet="6")
+ driver = snmp._get_driver(self.node)
+ oid = (1, 3, 6, 1, 4, 1, 21239, 5, 2, 3, 5, 1, 4, 6)
+ action = (4,)
+
+ self.assertEqual(oid, driver._snmp_oid(action))
+ self.assertEqual(2, driver.value_power_on)
+ self.assertEqual(4, driver.value_power_off)
+
def test_aten_snmp_objects(self, mock_get_client):
# Ensure the correct SNMP object OIDs and values are used by the
# Aten driver
diff --git a/ironic/tests/unit/drivers/pxe_config.template b/ironic/tests/unit/drivers/pxe_config.template
index 7cf91e369..238dde4ce 100644
--- a/ironic/tests/unit/drivers/pxe_config.template
+++ b/ironic/tests/unit/drivers/pxe_config.template
@@ -5,12 +5,6 @@ kernel /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_kernel
append initrd=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_ramdisk selinux=0 troubleshoot=0 text test_param
ipappend 2
-
-label boot_partition
-kernel /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/kernel
-append initrd=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/ramdisk root={{ ROOT }} ro text test_param
-
-
label boot_whole_disk
COM32 chain.c32
append mbr:{{ DISK_IDENTIFIER }}
diff --git a/ironic/tests/unit/drivers/pxe_grub_config.template b/ironic/tests/unit/drivers/pxe_grub_config.template
index 568018671..c4410b489 100644
--- a/ironic/tests/unit/drivers/pxe_grub_config.template
+++ b/ironic/tests/unit/drivers/pxe_grub_config.template
@@ -7,11 +7,6 @@ menuentry "deploy" {
initrdefi /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_ramdisk
}
-menuentry "boot_partition" {
- linuxefi /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/kernel root=(( ROOT )) ro text test_param boot_server=192.0.2.1
- initrdefi /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/ramdisk
-}
-
menuentry "boot_ramdisk" {
linuxefi /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/kernel root=/dev/ram0 text test_param ramdisk_param
initrdefi /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/ramdisk
diff --git a/releasenotes/notes/ValueDisplayName-13837c653277ff08.yaml b/releasenotes/notes/ValueDisplayName-13837c653277ff08.yaml
new file mode 100644
index 000000000..6abac74c6
--- /dev/null
+++ b/releasenotes/notes/ValueDisplayName-13837c653277ff08.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes detecting of allowable values for a BIOS settings enumeration in
+ the ``redfish`` BIOS interface when only ``ValueDisplayName`` is provided.
diff --git a/releasenotes/notes/additonal-snmp-drivers-ae1174e6bd6ee3a6.yaml b/releasenotes/notes/additonal-snmp-drivers-ae1174e6bd6ee3a6.yaml
new file mode 100644
index 000000000..f98f2e607
--- /dev/null
+++ b/releasenotes/notes/additonal-snmp-drivers-ae1174e6bd6ee3a6.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds ``raritan_pdu2``, ``servertech_sentry3``, ``servertech_sentry4``,
+ and ``vertivgest_pdu`` snmp drivers to support additional PDU models.
diff --git a/releasenotes/notes/adds-kickstart-auto-url-in-template-9f716c244adff159.yaml b/releasenotes/notes/adds-kickstart-auto-url-in-template-9f716c244adff159.yaml
new file mode 100644
index 000000000..df7e0bdcf
--- /dev/null
+++ b/releasenotes/notes/adds-kickstart-auto-url-in-template-9f716c244adff159.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds an automatic switch to ``url`` for the kickstart template when
+ the source is a URL path as opposed to a ``stage2`` ramdisk.
diff --git a/releasenotes/notes/anaconda-permit-cert-validation-disable-6611d3cb9401031d.yaml b/releasenotes/notes/anaconda-permit-cert-validation-disable-6611d3cb9401031d.yaml
new file mode 100644
index 000000000..59d306c5d
--- /dev/null
+++ b/releasenotes/notes/anaconda-permit-cert-validation-disable-6611d3cb9401031d.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Adds a configuration option, ``[anaconda]insecure_heartbeat`` to allow
+ for TLS certificate validation to be disabled in the ``anaconda``
+ deployment interface, which is needed for continious integration to
+ be able to be performed without substantial substrate image customization.
+ This option is *not* advised for any production usage.
diff --git a/releasenotes/notes/concurrency-limit-control-4b101bca7136e08d.yaml b/releasenotes/notes/concurrency-limit-control-4b101bca7136e08d.yaml
new file mode 100644
index 000000000..5eb8dd449
--- /dev/null
+++ b/releasenotes/notes/concurrency-limit-control-4b101bca7136e08d.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - |
+ Adds a concurrency limiter for number of nodes in states related to
+ *Cleaning* and *Provisioning* operations across the ironic deployment.
+ These settings default to a maximum number of concurrent deployments to
+ ``250`` and a maximum number of concurrent deletes and cleaning operations
+ to ``50``. These settings can be tuned using
+ ``[conductor]max_concurrent_deploy`` and
+ ``[conductor]max_concurrent_clean``, respectively.
+ The defaults should generally be good for most operators in most cases.
+ Large scale operators should evaluate the defaults and tune appropriately
+ as this feature cannot be disabled, as it is a security mechanism.
+upgrade:
+ - |
+ Large scale operators should be aware that a new feature, referred to as
+ "Concurrent Action Limit" was introduced as a security mechanism to
+ provide a means to limit attackers, or faulty scripts, from potentially
+ causing irreperable harm to an environment. This feature cannot be
+ disabled, and operators are encouraged to tune the new settings
+ ``[conductor]max_concurrent_deploy`` and
+ ``[conductor]max_concurrent_clean`` to match the needs of their
+ environment.
diff --git a/releasenotes/notes/correct-source-path-handling-lookups-4ce2023a56372f10.yaml b/releasenotes/notes/correct-source-path-handling-lookups-4ce2023a56372f10.yaml
new file mode 100644
index 000000000..10d270a45
--- /dev/null
+++ b/releasenotes/notes/correct-source-path-handling-lookups-4ce2023a56372f10.yaml
@@ -0,0 +1,16 @@
+---
+fixes:
+ - |
+ Fixes an issue where image information retrieval would fail when a
+ path was supplied when using the ``anaconda`` deploy interface,
+ as `HTTP` ``HEAD`` requests on a URL path have no ``Content-Length``.
+ We now consider if a path is used prior to attempting to collect
+ additional configuration data from what is normally expected to
+ be Glance.
+ - |
+ Fixes an issue where the fallback to a default kickstart template
+ value would result in error indicating
+ "Scheme-less image href is not a UUID".
+ This was becaues the handling code falling back to the default
+ did not explicitly indicate it was a file URL before saving the
+ value.
diff --git a/releasenotes/notes/create_csr_clean_step-a720932f61b42118.yaml b/releasenotes/notes/create_csr_clean_step-a720932f61b42118.yaml
new file mode 100644
index 000000000..1951245c1
--- /dev/null
+++ b/releasenotes/notes/create_csr_clean_step-a720932f61b42118.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds new clean steps ``create_csr`` and ``add_https_certificate``
+ to ``ilo`` and ``ilo5`` hardware types which allows users to
+ create Certificate Signing Request(CSR) and adds signed HTTPS
+ certificate to the iLO.
diff --git a/releasenotes/notes/deprecate-syslinux-support-98d327c67607fc8e.yaml b/releasenotes/notes/deprecate-syslinux-support-98d327c67607fc8e.yaml
index 670047a82..9543f9d84 100644
--- a/releasenotes/notes/deprecate-syslinux-support-98d327c67607fc8e.yaml
+++ b/releasenotes/notes/deprecate-syslinux-support-98d327c67607fc8e.yaml
@@ -23,4 +23,4 @@ deprecations:
- Deprecation of ``pxelinux``, as a result of the deprecation of ``syslinux``,
does ultimately mean the default for the ``pxe`` boot_interface to carry
defaults for the use of grub based network booting, specifically for
- for operators who are unable to use iPXE.
+ operators who are unable to use iPXE.
diff --git a/releasenotes/notes/fast-track-bios-fa9ae685c151dd24.yaml b/releasenotes/notes/fast-track-bios-fa9ae685c151dd24.yaml
new file mode 100644
index 000000000..b4a8004f7
--- /dev/null
+++ b/releasenotes/notes/fast-track-bios-fa9ae685c151dd24.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes rebooting into the agent after changing BIOS settings in fast-track
+ mode with the ``redfish-virtual-media`` boot interface. Previously, the ISO
+ would not be configured.
diff --git a/releasenotes/notes/fix-cleaning-stuck-on-networkerror-4aedbf3673413af6.yaml b/releasenotes/notes/fix-cleaning-stuck-on-networkerror-4aedbf3673413af6.yaml
new file mode 100644
index 000000000..f7769afc1
--- /dev/null
+++ b/releasenotes/notes/fix-cleaning-stuck-on-networkerror-4aedbf3673413af6.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Fixes an issue where cleaning operations could fail in such a way that was
+ not easily recoverable when pre-cleaning network interface configuration
+ was validated, yet contained invalid configuration.
+ Now Ironic properly captures the error and exits from cleaning in a
+ state which allows for cleaning to be retried.
diff --git a/releasenotes/notes/fix-idrac-redfish-controller-mode-7b55c58d09240d3c.yaml b/releasenotes/notes/fix-idrac-redfish-controller-mode-7b55c58d09240d3c.yaml
new file mode 100644
index 000000000..bf476dd63
--- /dev/null
+++ b/releasenotes/notes/fix-idrac-redfish-controller-mode-7b55c58d09240d3c.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes ``idrac-redfish`` RAID ``delete_configuration`` step to convert PERC
+ 9 and PERC 10 controllers to RAID mode if it is not already set.
diff --git a/releasenotes/notes/fix-ilo-boot-interface-order-238a2da9933cf28c.yaml b/releasenotes/notes/fix-ilo-boot-interface-order-238a2da9933cf28c.yaml
new file mode 100644
index 000000000..586ea6b82
--- /dev/null
+++ b/releasenotes/notes/fix-ilo-boot-interface-order-238a2da9933cf28c.yaml
@@ -0,0 +1,26 @@
+---
+fixes:
+ - |
+ Fixes the default boot interface order for the ``ilo`` hardware type
+ where previously it would prefer ``pxe`` over ``ipxe``. This created
+ inconsistencies for operators using multiple hardware types, where
+ both interfaces were enabled in the deployment.
+upgrade:
+ - |
+ Operators who are upgrading should be aware that a bug was discovered
+ with the automatic selection of ``boot_interface`` for users of the
+ ``ilo`` and ``ilo5`` hardware types. This was an inconsistency,
+ resulting in ``pxe`` being selected instead of ``ipxe`` if both
+ boot interfaces were enabled. Depending on the local configuration,
+ this may, or may not have happened and will remain static on
+ preexisting baremetal nodes. Some users may have been relying
+ upon this incorrect behavior by having misalligned defaults by trying
+ to use the ``pxe`` interface for ``ipxe``. Users wishing to continue
+ this usage as it was previously will need to explicitly set a
+ ``boot_interface`` value to either ``pxe`` or ``ilo-ipxe`` by default,
+ depending on the local configuration. Most operators have leveraged
+ the default examples, and thus did not explicitly encounter this
+ condition. Operators explicitly wishing to use ``pxe`` boot interfaces
+ with the ``ipxe`` templates and defaults set to override the defaults
+ for ``ironic.conf`` will need to either continue to leverage default
+ override configurations in their ``ironic.conf`` file.
diff --git a/releasenotes/notes/fix-pxe-glance-lookup-anaconda-86fe616c6286ec08.yaml b/releasenotes/notes/fix-pxe-glance-lookup-anaconda-86fe616c6286ec08.yaml
new file mode 100644
index 000000000..961b49cb0
--- /dev/null
+++ b/releasenotes/notes/fix-pxe-glance-lookup-anaconda-86fe616c6286ec08.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes an issue in the ``anaconda`` deployment interface where PXE argument
+ processing and preparation was erroneously directly connecting to Glance,
+ potentially leading to an exception in the standalone use case.
diff --git a/releasenotes/notes/ilo-event-subscription-0dadf136411bd16a.yaml b/releasenotes/notes/ilo-event-subscription-0dadf136411bd16a.yaml
new file mode 100644
index 000000000..fcfc515e4
--- /dev/null
+++ b/releasenotes/notes/ilo-event-subscription-0dadf136411bd16a.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Provides vendor passthru methods for ``ilo`` and ``ilo5`` hardware types
+ to create, delete and get subscriptions for BMC events. These methods are
+ supported for ``HPE ProLiant Gen10`` and ``HPE ProLiant Gen10 Plus``
+ servers.
diff --git a/releasenotes/notes/irmc-add-certification-file-option-34e7a0062c768e58.yaml b/releasenotes/notes/irmc-add-certification-file-option-34e7a0062c768e58.yaml
new file mode 100644
index 000000000..14e20864b
--- /dev/null
+++ b/releasenotes/notes/irmc-add-certification-file-option-34e7a0062c768e58.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Adds driver_info/irmc_verify_ca option to specify certification file.
+ Default value of driver_info/irmc_verify_ca is True.
+security:
+ - |
+ Modifies the ``irmc`` hardware type to include a capability to control
+ enforcement of HTTPS certificate verification. By default this is enforced.
+ python-scciclient >= 0.12.0 is required.
diff --git a/releasenotes/notes/jsonschema-4.8-1146d103b877cffd.yaml b/releasenotes/notes/jsonschema-4.8-1146d103b877cffd.yaml
new file mode 100644
index 000000000..75c0a6c50
--- /dev/null
+++ b/releasenotes/notes/jsonschema-4.8-1146d103b877cffd.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes API error messages with jsonschema>=4.8. A possible root cause is
+ now detected for generic schema errors.
diff --git a/releasenotes/notes/maximum-disk-erasure-concurrency-6d132bd84e3df4cf.yaml b/releasenotes/notes/maximum-disk-erasure-concurrency-6d132bd84e3df4cf.yaml
new file mode 100644
index 000000000..f09421593
--- /dev/null
+++ b/releasenotes/notes/maximum-disk-erasure-concurrency-6d132bd84e3df4cf.yaml
@@ -0,0 +1,10 @@
+---
+other:
+ - |
+ The maximum disk erasure concurrency setting,
+ ``[deploy]disk_erasure_concurrency`` has been incremed to 4.
+ Previously, this was kept at 1 in order to maintain continuity of
+ experience, but operators have not reported any issues with an increased
+ concurrency, and as such we feel comfortable upstream enabling concurrent
+ disk erasure/cleaning. This setting applies to the ``erase_devices`` clean
+ step.
diff --git a/releasenotes/notes/no-netboot-d08f46c12edabd35.yaml b/releasenotes/notes/no-netboot-d08f46c12edabd35.yaml
new file mode 100644
index 000000000..2427192c9
--- /dev/null
+++ b/releasenotes/notes/no-netboot-d08f46c12edabd35.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ The deprecated support for instance network booting (not to be confused
+ with the ``ramdisk`` deploy, iSCSI boot or Anaconda deploy) has been
+ removed. The ``boot_option`` capability is no longer supported.
diff --git a/releasenotes/notes/node-creation-no-longer-scope-restricted-b455f66a751f10ec.yaml b/releasenotes/notes/node-creation-no-longer-scope-restricted-b455f66a751f10ec.yaml
new file mode 100644
index 000000000..b405dddb3
--- /dev/null
+++ b/releasenotes/notes/node-creation-no-longer-scope-restricted-b455f66a751f10ec.yaml
@@ -0,0 +1,27 @@
+---
+features:
+ - |
+ Adds the capability for a project scoped ``admin`` user to be able to
+ create nodes in Ironic, which are then manageable by the project scoped
+ ``admin`` user. Effectively, this is self service Bare Metal as a Service,
+ however more advanced fields such as drivers, chassies, are not available
+ to these users. This is controlled through an auto-population of the
+ Node ``owner`` field, and can be controlled through the
+ ``[api]project_admin_can_manage_own_nodes`` setting, which defaults to
+ ``True``, and the new policy ``baremetal:node:create:self_owned_node``.
+ - |
+ Adds the capability for a project scoped ``admin`` user to be able to
+ delete nodes from Ironic which their `project` owns. This can be
+ contolled through the ``[api]project_admin_can_manage_own_nodes``
+ setting, which defaults to ``True``, as well as the
+ ``baremetal:node:delete:self_owned_node`` policy.
+security:
+ - |
+ This release contains an improvement which, by default, allows users to
+ create and delete baremetal nodes inside their own project. This can be
+ disabled using the ``[api]project_admin_can_manage_own_nodes`` setting.
+upgrades:
+ - |
+ The API version has been increased to ``1.80`` in order to signify
+ the addition of additoinal Role Based Access Controls capabilities
+ around node creation and deletion. \ No newline at end of file
diff --git a/releasenotes/notes/override-external_http_url-per-node-f5423b00b373e528.yaml b/releasenotes/notes/override-external_http_url-per-node-f5423b00b373e528.yaml
new file mode 100644
index 000000000..93e661ba7
--- /dev/null
+++ b/releasenotes/notes/override-external_http_url-per-node-f5423b00b373e528.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Nodes using virtual media can now specify their own external URL.
+ This setting can be leveraged via the ``driver_info\external_http_url``
+ node setting.
+ When used, this setting overrides the ``[deploy]http_url`` and
+ ``[deploy]external_http_url`` settings in the configuration file.
diff --git a/releasenotes/notes/prevent-pxe-retry-when-token-exists-a4f38f7da56c1397.yaml b/releasenotes/notes/prevent-pxe-retry-when-token-exists-a4f38f7da56c1397.yaml
index 5db6db6ec..62e7ae9ca 100644
--- a/releasenotes/notes/prevent-pxe-retry-when-token-exists-a4f38f7da56c1397.yaml
+++ b/releasenotes/notes/prevent-pxe-retry-when-token-exists-a4f38f7da56c1397.yaml
@@ -1,7 +1,7 @@
---
fixes:
- |
- Fixes a race condition in PXE initialization where logic to retry
+ Fixes a race condition in PXE initialization where the logic to retry
what we suspect as potentially failed PXE boot operations was not
consulting if an ``agent token`` had been established, which is the
very first step in agent initialization.
diff --git a/releasenotes/notes/ramdisk-deploy-384a38c3c96059dd.yaml b/releasenotes/notes/ramdisk-deploy-384a38c3c96059dd.yaml
new file mode 100644
index 000000000..0fb33937e
--- /dev/null
+++ b/releasenotes/notes/ramdisk-deploy-384a38c3c96059dd.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The ``ramdisk`` deploy interface is now enabled by default. When the
+ default ``direct`` deploy is also enabled, the ``ramdisk`` deploy must be
+ explicitly requested on the node level.
diff --git a/releasenotes/notes/redfish_consider_password_in_session_cache-1fa84234db179053.yaml b/releasenotes/notes/redfish_consider_password_in_session_cache-1fa84234db179053.yaml
new file mode 100644
index 000000000..af48b88fa
--- /dev/null
+++ b/releasenotes/notes/redfish_consider_password_in_session_cache-1fa84234db179053.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes an issue where the Redfish session cache would continue using an
+ old session when a password for a Redfish BMC was changed. Now the old
+ session will not be found in this case, and a new session will be created
+ with the latest credential information available.
diff --git a/releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml b/releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml
index df9bef955..a829cbd97 100644
--- a/releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml
+++ b/releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml
@@ -1,8 +1,8 @@
---
fixes:
- |
- Resolved clear_job_queue and reset_idrac verify step failures which occur
- when the functionality is not supported by the iDRAC. When this condition
- is detected, the code in the step handles the exception and logs a warning
- and completes successfully in case of verification steps but fails in case
- of cleaning steps.
+ Resolved ``clear_job_queue`` and ``reset_idrac`` verify step failures which
+ occur when the functionality is not supported by the iDRAC. When this
+ condition is detected, the code in the step handles the exception and logs
+ a warning and completes successfully in case of verification steps but
+ fails in case of cleaning steps.
diff --git a/releasenotes/notes/suppress_chassis_not_found_error-99ee4b902d504ec7.yaml b/releasenotes/notes/suppress_chassis_not_found_error-99ee4b902d504ec7.yaml
new file mode 100644
index 000000000..bc55ec3a2
--- /dev/null
+++ b/releasenotes/notes/suppress_chassis_not_found_error-99ee4b902d504ec7.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Fixes an issue where an API user, when requesting a node list or single
+ node object, could get an error indicating that the request was bad as
+ the chassis was not found. This can occur when in-flight delete
+ operations are in progress on another thread. Instead of surfacing a
+ request breaking error, the API now suppresses the error and just
+ treats it as if there is no Chassis.
diff --git a/releasenotes/notes/version-foo-2eb39b768112547f.yaml b/releasenotes/notes/version-foo-2eb39b768112547f.yaml
new file mode 100644
index 000000000..bd0485863
--- /dev/null
+++ b/releasenotes/notes/version-foo-2eb39b768112547f.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes ``enable_netboot_fallback`` to cause iPXE config to exit 0
+ when "sanboot --no-describe" fails. Allowing the firmware to
+ move onto the next device in the boot order.
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index 62f118945..d4d148d41 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -2,15 +2,16 @@
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2019. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Ironic Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2021-04-04 18:36+0000\n"
+"POT-Creation-Date: 2022-09-06 22:51+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-12-19 01:42+0000\n"
+"PO-Revision-Date: 2022-09-05 10:29+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -27,7 +28,7 @@ msgid ""
msgstr ""
"\"Choooooo! Choooooo!\" The Train is now departing the station. The "
"OpenStack Bare Metal as a service team is proud to announce the release of "
-"Ironic 13.0.0. This release brings the long desired feature of software RAID "
+"Ironic 13.0.0. This release brings the long-desired feature of software RAID "
"configuration, Redfish virtual media boot support, sensor data improvements, "
"and numerous bug fixes. We hope you enjoy your ride on the OpenStack Ironic "
"Train."
@@ -74,6 +75,31 @@ msgstr ""
"hardware type to support new out-of-band inspection capabilities. If an "
"older version is used, the new capabilities will not be discovered."
+msgid "/v1/drivers/<driver name>/properties"
+msgstr "/v1/drivers/<driver name>/properties"
+
+msgid "/v1/nodes/<UUID or logical name>/states"
+msgstr "/v1/nodes/<UUID or logical name>/states"
+
+msgid ""
+"1.12 allows setting RAID properties for a node; however support for putting "
+"this configuration on a node is not yet implemented for in-tree drivers; "
+"this will be added in a future release."
+msgstr ""
+"1.12 allows setting RAID properties for a node; however support for putting "
+"this configuration on a node is not yet implemented for in-tree drivers; "
+"this will be added in a future release."
+
+msgid ""
+"1.13 adds a new 'abort' verb to the provision state API. This may be used to "
+"abort cleaning for nodes in the CLEANWAIT state."
+msgstr ""
+"1.13 adds a new 'abort' verb to the provision state API. This may be used to "
+"abort cleaning for nodes in the CLEANWAIT state."
+
+msgid "1.14 makes the following endpoints discoverable in the API:"
+msgstr "1.14 makes the following endpoints discoverable in the API:"
+
msgid "10.0.0"
msgstr "10.0.0"
@@ -86,8 +112,8 @@ msgstr "10.1.1"
msgid "10.1.10"
msgstr "10.1.10"
-msgid "10.1.10-10"
-msgstr "10.1.10-10"
+msgid "10.1.10-11"
+msgstr "10.1.10-11"
msgid "10.1.2"
msgstr "10.1.2"
@@ -128,6 +154,9 @@ msgstr "11.1.3"
msgid "11.1.4"
msgstr "11.1.4"
+msgid "11.1.4-12"
+msgstr "11.1.4-12"
+
msgid "12.0.0"
msgstr "12.0.0"
@@ -152,6 +181,9 @@ msgstr "12.1.5"
msgid "12.1.6"
msgstr "12.1.6"
+msgid "12.1.6-3"
+msgstr "12.1.6-3"
+
msgid "12.2.0"
msgstr "12.2.0"
@@ -176,6 +208,12 @@ msgstr "13.0.5"
msgid "13.0.6"
msgstr "13.0.6"
+msgid "13.0.7"
+msgstr "13.0.7"
+
+msgid "13.0.7-25"
+msgstr "13.0.7-25"
+
msgid "14.0.0"
msgstr "14.0.0"
@@ -185,6 +223,12 @@ msgstr "15.0.0"
msgid "15.0.1"
msgstr "15.0.1"
+msgid "15.0.2"
+msgstr "15.0.2"
+
+msgid "15.0.2-17"
+msgstr "15.0.2-17"
+
msgid "15.1.0"
msgstr "15.1.0"
@@ -200,6 +244,72 @@ msgstr "16.0.1"
msgid "16.0.2"
msgstr "16.0.2"
+msgid "16.0.3"
+msgstr "16.0.3"
+
+msgid "16.0.4"
+msgstr "16.0.4"
+
+msgid "16.0.5"
+msgstr "16.0.5"
+
+msgid "16.1.0"
+msgstr "16.1.0"
+
+msgid "16.2.0"
+msgstr "16.2.0"
+
+msgid "17.0.0"
+msgstr "17.0.0"
+
+msgid "17.0.2"
+msgstr "17.0.2"
+
+msgid "17.0.3"
+msgstr "17.0.3"
+
+msgid "17.0.4"
+msgstr "17.0.4"
+
+msgid "17.0.4-34"
+msgstr "17.0.4-34"
+
+msgid "18.0.0"
+msgstr "18.0.0"
+
+msgid "18.1.0"
+msgstr "18.1.0"
+
+msgid "18.2.0"
+msgstr "18.2.0"
+
+msgid "18.2.1"
+msgstr "18.2.1"
+
+msgid "18.2.1-27"
+msgstr "18.2.1-27"
+
+msgid "19.0.0"
+msgstr "19.0.0"
+
+msgid "20.0.0"
+msgstr "20.0.0"
+
+msgid "20.1.0"
+msgstr "20.1.0"
+
+msgid "20.1.0-24"
+msgstr "20.1.0-24"
+
+msgid "20.2.0"
+msgstr "20.2.0"
+
+msgid "21.0.0"
+msgstr "21.0.0"
+
+msgid "4.0.0 First semver release"
+msgstr "4.0.0 First semver release"
+
msgid "4.2.2"
msgstr "4.2.2"
@@ -477,6 +587,21 @@ msgstr ""
"``enroll``, ``inspecting``, and ``manageable`` states."
msgid ""
+"A new @passthru decorator was introduced to the driver API in a previous "
+"release. In this release, support for vendor_passthru and "
+"driver_vendor_passthru methods has been removed. All in-tree drivers have "
+"been updated. Any out of tree drivers which did not update to the @passthru "
+"decorator during the previous release will need to do so to be compatible "
+"with this release."
+msgstr ""
+"A new @passthru decorator was introduced to the driver API in a previous "
+"release. In this release, support for vendor_passthru and "
+"driver_vendor_passthru methods has been removed. All in-tree drivers have "
+"been updated. Any out-of-tree drivers which did not update to the @passthru "
+"decorator during the previous release will need to do so to be compatible "
+"with this release."
+
+msgid ""
"A new WSGI application script ``ironic-api-wsgi`` is now available. It is "
"auto-generated by ``pbr`` and provides the ability to serve the bare metal "
"API using a WSGI server (for example Nginx and uWSGI or Apache with "
@@ -488,6 +613,13 @@ msgstr ""
"mod_wsgi)."
msgid ""
+"A new class ``ironic.drivers.modules.agent.CustomAgentDeploy`` can be used "
+"as a base class for deploy interfaces based on ironic-python-agent."
+msgstr ""
+"A new class ``ironic.drivers.modules.agent.CustomAgentDeploy`` can be used "
+"as a base class for deploying interfaces based on ironic-python-agent."
+
+msgid ""
"A new configuration option ``[agent]require_tls`` allows rejecting ramdisk "
"callback URLs that don't use the ``https://`` schema."
msgstr ""
@@ -730,6 +862,9 @@ msgstr ""
"Ironic to wait for Neutron port operations until we have a mechanism for "
"synchronising events with Neutron. Set to 0 by default."
+msgid "Add OCS Driver"
+msgstr "Add OCS Driver"
+
msgid "Add UCS Driver"
msgstr "Add UCS Driver"
@@ -850,6 +985,9 @@ msgstr ""
"lock up for a quite long time and ironic will not allow to perform any "
"operations with it."
+msgid "Added CORS support"
+msgstr "Added CORS support"
+
msgid ""
"Added configdrive support for whole disk images for iSCSI based deploy. This "
"will work for UEFI only or BIOS only images. It will not work for hybrid "
@@ -1735,9 +1873,50 @@ msgid ""
msgstr ""
"Adds inspection support for the `agent_ipmitool` and `agent_ssh` drivers."
+msgid ""
+"After a deprecation period, the scripts and support for migrating from the "
+"old Nova \"baremetal\" driver to the new Nova \"ironic\" driver have been "
+"removed from Ironic's tree."
+msgstr ""
+"After a deprecation period, the scripts and support for migrating from the "
+"old Nova \"baremetal\" driver to the new Nova \"ironic\" driver have been "
+"removed from Ironic's tree."
+
+msgid ""
+"Beginning with API v1.11, newly created Nodes begin in the ENROLL state, and "
+"must be \"managed\" and \"provided\" before they are made available for "
+"provisioning. API clients must be updated to handle the new workflow when "
+"they begin sending the X-OpenStack-Ironic-API-Version header with a value >= "
+"1.11."
+msgstr ""
+"Beginning with API v1.11, newly created Nodes begin in the ENROLL state and "
+"must be \"managed\" and \"provided\" before they are made available for "
+"provisioning. API clients must be updated to handle the new workflow when "
+"they begin sending the X-OpenStack-Ironic-API-Version header with a value >= "
+"1.11."
+
msgid "Current Series Release Notes"
msgstr "Current Series Release Notes"
+msgid "Deprecated the 'parallel' option to periodic task decorator"
+msgstr "Deprecated the 'parallel' option to periodic task decorator"
+
+msgid ""
+"Drivers may optionally add a new BootInterface. This is merely a refactoring "
+"of the Driver API to support future improvements."
+msgstr ""
+"Drivers may optionally add a new BootInterface. This is merely a refactoring "
+"of the Driver API to support future improvements."
+
+msgid "Implemented a new Boot interface for drivers"
+msgstr "Implemented a new Boot interface for drivers"
+
+msgid "Introduce new BootInterface to the Driver API"
+msgstr "Introduce new BootInterface to the Driver API"
+
+msgid "Migrations from Nova \"baremetal\" have been removed"
+msgstr "Migrations from Nova \"baremetal\" have been removed"
+
msgid "Mitaka Series (4.3.0 - 5.1.x) Release Notes"
msgstr "Mitaka Series (4.3.0 - 5.1.x) Release Notes"
@@ -1767,9 +1946,83 @@ msgstr ""
msgid "Queens Series (9.2.0 - 10.1.x) Release Notes"
msgstr "Queens Series (9.2.0 - 10.1.x) Release Notes"
+msgid "Raised API version to 1.11"
+msgstr "Raised API version to 1.11"
+
+msgid "Raised API version to 1.14"
+msgstr "Raised API version to 1.14"
+
+msgid "Removal of deprecated vendor driver methods"
+msgstr "Removal of deprecated vendor driver methods"
+
+msgid "Removed deprecated 'admin_api' policy rule"
+msgstr "Removed deprecated 'admin_api' policy rule"
+
+msgid "Rocky Series (11.0.0 - 11.1.x) Release Notes"
+msgstr "Rocky Series (11.0.0 - 11.1.x) Release Notes"
+
+msgid "Several hardware drivers have been added or enhanced"
+msgstr "Several hardware drivers have been added or enhanced"
+
+msgid "Stein Series (12.0.0 - 12.1.x) Release Notes"
+msgstr "Stein Series (12.0.0 - 12.1.x) Release Notes"
+
msgid "Support for the new ENROLL workflow during Node creation"
msgstr "Support for the new ENROLL workflow during Node creation"
+msgid ""
+"The Ironic API now has support for CORS requests, that may be used by, for "
+"example, web browser-based clients. This is configured in the [cors] section "
+"of ironic.conf."
+msgstr ""
+"The Ironic API now has support for CORS requests, that may be used by, for "
+"example, web browser-based clients. This is configured in the [cors] section "
+"of ironic.conf."
+
+msgid ""
+"This change enhances the driver interface for driver authors, and should not "
+"affect users of Ironic, by splitting control of booting a server from the "
+"DeployInterface. The BootInterface is responsible for booting an image on a "
+"server, while the DeployInterface is responsible for deploying a tenant "
+"image to a server."
+msgstr ""
+"This change enhances the driver interface for driver authors, and should not "
+"affect users of Ironic, by splitting control of booting a server from the "
+"DeployInterface. The BootInterface is responsible for booting an image on a "
+"server, while the DeployInterface is responsible for deploying a tenant "
+"image to a server."
+
+msgid ""
+"This is the first semver-versioned release of Ironic, created during the "
+"OpenStack \"Liberty\" development cycle. It marks a pivot in our versioning "
+"schema from date-based versioning; the previous released version was 2015.1. "
+"Full release details are available on Launchpad: https://launchpad.net/"
+"ironic/liberty/4.0.0."
+msgstr ""
+"This is the first semver-versioned release of Ironic, created during the "
+"OpenStack \"Liberty\" development cycle. It marks a pivot in our versioning "
+"schema from date-based versioning; the previously released version was "
+"2015.1. Full release details are available on Launchpad: https://launchpad."
+"net/ironic/liberty/4.0.0."
+
+msgid "Train Series (12.2.0 - 13.0.x) Release Notes"
+msgstr "Train Series (12.2.0 - 13.0.x) Release Notes"
+
+msgid "Ussuri Series (14.0.0 - 15.0.x) Release Notes"
+msgstr "Ussuri Series (14.0.0 - 15.0.x) Release Notes"
+
+msgid "Victoria Series (15.1.0 - 16.0.x) Release Notes"
+msgstr "Victoria Series (15.1.0 - 16.0.x) Release Notes"
+
+msgid "Wallaby Series (16.1.0 - 17.0.x) Release Notes"
+msgstr "Wallaby Series (16.1.0 - 17.0.x) Release Notes"
+
+msgid "Xena Series (18.0.0 - 18.2.x) Release Notes"
+msgstr "Xena Series (18.0.0 - 18.2.x) Release Notes"
+
+msgid "Yoga Series Release Notes"
+msgstr "Yoga Series Release Notes"
+
msgid "ipmitool driver supports IPMI v1.5"
msgstr "ipmitool driver supports IPMI v1.5"
@@ -1779,3 +2032,23 @@ msgid ""
msgstr ""
"pxe_ilo driver now supports UEFI Secure Boot (previous releases of the iLO "
"driver only supported this for agent_ilo and iscsi_ilo)"
+
+msgid ""
+"v1.10 fixes Node logical names to support all `RFC 3986`_ unreserved "
+"characters"
+msgstr ""
+"v1.10 fixes Node logical names to support all `RFC 3986`_ unreserved "
+"characters"
+
+msgid ""
+"v1.11 changes the default state of newly created Nodes from AVAILABLE to "
+"ENROLL"
+msgstr ""
+"v1.11 changes the default state of newly created Nodes from AVAILABLE to "
+"ENROLL"
+
+msgid "v1.7 exposes a new 'clean_step' property on the Node resource."
+msgstr "v1.7 exposes a new 'clean_step' property on the Node resource."
+
+msgid "v1.8 and v1.9 improve query and filter support"
+msgstr "v1.8 and v1.9 improve query and filter support"
diff --git a/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po
deleted file mode 100644
index 0f177ed00..000000000
--- a/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po
+++ /dev/null
@@ -1,159 +0,0 @@
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
-# Akihiro Motoki <amotoki@gmail.com>, 2016. #zanata
-# Akihito INOH <aki-inou@rs.jp.nec.com>, 2018. #zanata
-msgid ""
-msgstr ""
-"Project-Id-Version: ironic\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2018-08-09 13:46+0000\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2018-02-15 11:45+0000\n"
-"Last-Translator: Akihito INOH <aki-inou@rs.jp.nec.com>\n"
-"Language-Team: Japanese\n"
-"Language: ja\n"
-"X-Generator: Zanata 4.3.3\n"
-"Plural-Forms: nplurals=1; plural=0\n"
-
-msgid ""
-"\"Port group\" support allows users to take advantage of bonded network "
-"interfaces."
-msgstr ""
-"\"Port group\" のサポートにより、ユーザーはボンディングされたネットワークイン"
-"ターフェースが利用できるようになります。"
-
-msgid "10.0.0"
-msgstr "10.0.0"
-
-msgid "10.1.0"
-msgstr "10.1.0"
-
-msgid "4.2.2"
-msgstr "4.2.2"
-
-msgid "4.2.3"
-msgstr "4.2.3"
-
-msgid "4.2.4"
-msgstr "4.2.4"
-
-msgid "4.2.5"
-msgstr "4.2.5"
-
-msgid "4.3.0"
-msgstr "4.3.0"
-
-msgid "443, 80"
-msgstr "443, 80"
-
-msgid "5.0.0"
-msgstr "5.0.0"
-
-msgid "5.1.0"
-msgstr "5.1.0"
-
-msgid "5.1.1"
-msgstr "5.1.1"
-
-msgid "5.1.2"
-msgstr "5.1.2"
-
-msgid "5.1.3"
-msgstr "5.1.3"
-
-msgid "6.0.0"
-msgstr "6.0.0"
-
-msgid "6.1.0"
-msgstr "6.1.0"
-
-msgid "6.2.0"
-msgstr "6.2.0"
-
-msgid "6.2.2"
-msgstr "6.2.2"
-
-msgid "6.2.3"
-msgstr "6.2.3"
-
-msgid "6.2.4"
-msgstr "6.2.4"
-
-msgid "6.3.0"
-msgstr "6.3.0"
-
-msgid "7.0.0"
-msgstr "7.0.0"
-
-msgid "7.0.1"
-msgstr "7.0.1"
-
-msgid "7.0.2"
-msgstr "7.0.2"
-
-msgid "7.0.3"
-msgstr "7.0.3"
-
-msgid "7.0.4"
-msgstr "7.0.4"
-
-msgid "8.0.0"
-msgstr "8.0.0"
-
-msgid "9.0.0"
-msgstr "9.0.0"
-
-msgid "9.0.1"
-msgstr "9.0.1"
-
-msgid "9.1.0"
-msgstr "9.1.0"
-
-msgid "9.1.1"
-msgstr "9.1.1"
-
-msgid "9.1.2"
-msgstr "9.1.2"
-
-msgid "9.1.3"
-msgstr "9.1.3"
-
-msgid "9.2.0"
-msgstr "9.2.0"
-
-msgid ""
-"A few major changes are worth mentioning. This is not an exhaustive list:"
-msgstr ""
-"いくつかの主要な変更がありました。全てではありませんが以下にリストを示しま"
-"す。"
-
-msgid "A few major changes since 9.1.x (Pike) are worth mentioning:"
-msgstr "9.1.x (Pike) からの主要な変更がいくつかありました。"
-
-msgid "Bug Fixes"
-msgstr "バグ修正"
-
-msgid "Current Series Release Notes"
-msgstr "開発中バージョンのリリースノート"
-
-msgid "Deprecation Notes"
-msgstr "廃止予定の機能"
-
-msgid "Known Issues"
-msgstr "既知の問題"
-
-msgid "New Features"
-msgstr "新機能"
-
-msgid "Option"
-msgstr "オプション"
-
-msgid "Other Notes"
-msgstr "その他の注意点"
-
-msgid "Security Issues"
-msgstr "セキュリティー上の問題"
-
-msgid "Upgrade Notes"
-msgstr "アップグレード時の注意"
diff --git a/requirements.txt b/requirements.txt
index 2ac3e8348..ae8e14f39 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -38,7 +38,7 @@ requests>=2.18.0 # Apache-2.0
rfc3986>=1.2.0 # Apache-2.0
jsonpatch!=1.20,>=1.16 # BSD
Jinja2>=3.0.0 # BSD License (3 clause)
-keystonemiddleware>=4.17.0 # Apache-2.0
+keystonemiddleware>=9.5.0 # Apache-2.0
oslo.messaging>=5.29.0 # Apache-2.0
tenacity>=6.2.0 # Apache-2.0
oslo.versionedobjects>=1.31.2 # Apache-2.0
@@ -47,4 +47,4 @@ psutil>=3.2.2 # BSD
futurist>=1.2.0 # Apache-2.0
tooz>=2.7.0 # Apache-2.0
openstacksdk>=0.48.0 # Apache-2.0
-sushy>=3.10.0
+sushy>=4.3.0
diff --git a/tools/config/ironic-config-generator.conf b/tools/config/ironic-config-generator.conf
index 5c01f82dd..eb7e06410 100644
--- a/tools/config/ironic-config-generator.conf
+++ b/tools/config/ironic-config-generator.conf
@@ -23,4 +23,5 @@ namespace = oslo.service.service
namespace = oslo.service.periodic_task
namespace = oslo.service.sslutils
namespace = osprofiler
+namespace = keystonemiddleware.audit
namespace = keystonemiddleware.auth_token
diff --git a/tox.ini b/tox.ini
index cffaa8f1d..247e819a4 100644
--- a/tox.ini
+++ b/tox.ini
@@ -11,7 +11,6 @@ setenv = VIRTUAL_ENV={envdir}
PYTHONDONTWRITEBYTECODE = 1
LANGUAGE=en_US
LC_ALL=en_US.UTF-8
- PYTHONWARNINGS=default::DeprecationWarning
deps =
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
diff --git a/zuul.d/ironic-jobs.yaml b/zuul.d/ironic-jobs.yaml
index 9fd0e601d..9d7435bd3 100644
--- a/zuul.d/ironic-jobs.yaml
+++ b/zuul.d/ironic-jobs.yaml
@@ -217,6 +217,48 @@
s-proxy: False
- job:
+ name: ironic-standalone-anaconda
+ parent: ironic-standalone-redfish
+ description:
+ Test ironic with the anaconda deployment interface.
+ Test also uses Redfish.
+ required-projects:
+ - opendev.org/openstack/sushy-tools
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^api-ref/.*$
+ - ^doc/.*$
+ - ^install-guide/.*$
+ - ^ironic/locale/.*$
+ - ^ironic/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^test-requirements.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ vars:
+ tempest_test_regex: BaremetalRedfishIPxeAnacondaNoGlance
+ tempest_test_timeout: 4800
+ tempest_concurrency: 2
+ devstack_localrc:
+ IRONIC_ENABLED_DEPLOY_INTERFACES: "anaconda"
+ IRONIC_VM_COUNT: 2
+ IRONIC_VM_VOLUME_COUNT: 1
+ IRONIC_VM_SPECS_RAM: 3192
+ IRONIC_VM_SPECS_CPU: 3
+ IRONIC_ENFORCE_SCOPE: True
+ # We're using a lot of disk space in this job. Some testing nodes have
+ # a small root partition, so use /opt which is mounted from a bigger
+ # ephemeral partition on such nodes
+ LIBVIRT_STORAGE_POOL_PATH: /opt/libvirt/images
+ IRONIC_ANACONDA_IMAGE_REF: http://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/
+ IRONIC_ANACONDA_KERNEL_REF: http://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/pxeboot/vmlinuz
+ IRONIC_ANACONDA_RAMDISK_REF: http://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/pxeboot/initrd.img
+ IRONIC_ANACONDA_INSECURE_HEARTBEAT: True
+ IRONIC_DEPLOY_CALLBACK_WAIT_TIMEOUT: 3600
+ IRONIC_PXE_BOOT_RETRY_TIMEOUT: 3600
+
+- job:
name: ironic-tempest-bios-redfish-pxe
description: "Deploy ironic node over PXE using BIOS boot mode"
parent: ironic-tempest-uefi-redfish-vmedia
@@ -240,17 +282,6 @@
s-object: False
s-proxy: False
-# TODO(dtantsur): remove when sushy-tools no longer uses it.
-- job:
- name: ironic-tempest-bios-redfish-netboot
- description: "Deploy ironic node over PXE using BIOS boot mode"
- parent: ironic-tempest-bios-redfish-pxe
- vars:
- devstack_localrc:
- IRONIC_DEFAULT_BOOT_OPTION: netboot
- IRONIC_TEMPEST_WHOLE_DISK_IMAGE: False
- IRONIC_VM_EPHEMERAL_DISK: 1
-
- job:
name: ironic-tempest-uefi-redfish-vmedia
description: "Deploy ironic node over Redfish virtual media using UEFI boot mode"
@@ -519,7 +550,6 @@
IRONIC_BAREMETAL_BASIC_OPS: True
IRONIC_BUILD_DEPLOY_RAMDISK: False
IRONIC_CALLBACK_TIMEOUT: 600
- IRONIC_DEFAULT_BOOT_OPTION: local
IRONIC_DEPLOY_DRIVER: ipmi
IRONIC_ENABLED_NETWORK_INTERFACES: flat,neutron
IRONIC_INSPECTOR_BUILD_RAMDISK: False
@@ -609,7 +639,6 @@
IRONIC_AUTOMATED_CLEAN_ENABLED: False
IRONIC_BAREMETAL_BASIC_OPS: True
IRONIC_DEPLOY_DRIVER: ipmi
- IRONIC_DEFAULT_BOOT_OPTION: local
IRONIC_ENABLED_NETWORK_INTERFACES: flat,neutron
IRONIC_NETWORK_INTERFACE: neutron
IRONIC_PROVISION_NETWORK_NAME: ironic-provision
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 8b821f816..586675f87 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -45,6 +45,8 @@
voting: false
- ironic-tempest-ipxe-ipv6:
voting: false
+ - ironic-standalone-anaconda:
+ voting: false
- ironic-inspector-tempest-rbac-scope-enforced:
voting: false
- bifrost-integration-tinyipa-ubuntu-focal: