summaryrefslogtreecommitdiff
path: root/ironic
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2022-01-12 14:10:28 +0000
committerGerrit Code Review <review@openstack.org>2022-01-12 14:10:28 +0000
commitcb25103198cd2349d87a88a7a5aba92fcda50f61 (patch)
treefa41ea242de92441cb37e18c05abd32a99411250 /ironic
parentfc4702439aaf074db6c8ce98b646f89e1ec22203 (diff)
parent6fed0964a5cd913264165ca8c218a9e26b913ab2 (diff)
downloadironic-cb25103198cd2349d87a88a7a5aba92fcda50f61.tar.gz
Merge "Fix Redfish RAID deploy steps"
Diffstat (limited to 'ironic')
-rw-r--r--ironic/drivers/modules/redfish/raid.py12
1 files changed, 8 insertions, 4 deletions
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
index 6e63e54f7..aa4294497 100644
--- a/ironic/drivers/modules/redfish/raid.py
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -1010,8 +1010,8 @@ class RedfishRAID(base.RAIDInterface):
@periodics.node_periodic(
purpose='checking async RAID config failed',
spacing=CONF.redfish.raid_config_fail_interval,
- filters={'reserved': False, 'provision_state': states.CLEANFAIL,
- 'maintenance': True},
+ filters={'reserved': False, 'provision_state_in': {
+ states.CLEANFAIL, states.DEPLOYFAIL}, 'maintenance': True},
predicate_extra_fields=['driver_internal_info'],
predicate=lambda n: n.driver_internal_info.get('raid_configs'),
)
@@ -1032,7 +1032,8 @@ class RedfishRAID(base.RAIDInterface):
@periodics.node_periodic(
purpose='checking async RAID config tasks',
spacing=CONF.redfish.raid_config_status_interval,
- filters={'reserved': False, 'provision_state': states.CLEANWAIT},
+ filters={'reserved': False, 'provision_state_in': {
+ states.CLEANWAIT, states.DEPLOYWAIT}},
predicate_extra_fields=['driver_internal_info'],
predicate=lambda n: n.driver_internal_info.get('raid_configs'),
)
@@ -1110,4 +1111,7 @@ class RedfishRAID(base.RAIDInterface):
self._clear_raid_configs(node)
LOG.info('RAID configuration completed for node %(node)s',
{'node': node.uuid})
- manager_utils.notify_conductor_resume_clean(task)
+ if task.node.clean_step:
+ manager_utils.notify_conductor_resume_clean(task)
+ else:
+ manager_utils.notify_conductor_resume_deploy(task)