summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAija Jauntēva <aija.jaunteva@dell.com>2022-05-20 06:00:45 -0400
committerAija Jauntēva <aija.jaunteva@dell.com>2022-07-19 10:43:17 -0400
commit249cbf2abaeaedefdea5c9eca9783572e9fc2cd5 (patch)
tree371118531cf8a1f03b64d9226834c7405602c2de
parentc19a2baf605853e8c5636b72404dea568b8ab94b (diff)
downloadironic-249cbf2abaeaedefdea5c9eca9783572e9fc2cd5.tar.gz
Fix Redfish RAID to update raid_config
Story: 2003514 Task: 41940 Conflicts: ironic/drivers/modules/redfish/raid.py ironic/tests/unit/drivers/modules/redfish/test_raid.py Change-Id: I753c4b00c0a64bcdc89c9bc0afd46f1211f7847b (cherry picked from commit 29364b7fb40327dc277c535e5b829133274ad420)
-rw-r--r--ironic/drivers/modules/redfish/raid.py31
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_raid.py127
-rw-r--r--ironic/tests/unit/drivers/third_party_driver_mock_specs.py3
-rw-r--r--ironic/tests/unit/drivers/third_party_driver_mocks.py3
-rw-r--r--releasenotes/notes/fix-redfish-raid-config-9e868c3e069475a1.yaml6
5 files changed, 165 insertions, 5 deletions
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
index 014e49865..2d0b198f1 100644
--- a/ironic/drivers/modules/redfish/raid.py
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -24,6 +24,7 @@ from oslo_utils import units
from ironic.common import exception
from ironic.common.i18n import _
+from ironic.common import raid as raid_common
from ironic.common import raid
from ironic.common import states
from ironic.conductor import task_manager
@@ -686,6 +687,30 @@ def create_virtual_disk(task, raid_controller, physical_disks, raid_level,
raise exception.RedfishError(error=exc)
+def update_raid_config(node):
+ """Updates node's raid_config field with current logical disks.
+
+ :param node: node for which to update the raid_config field
+ """
+ system = redfish_utils.get_system(node)
+ logical_disks = []
+ for stor in system.storage.get_members():
+ for vol in stor.volumes.get_members():
+ if vol.raid_type:
+ logical_disk = {
+ 'id': vol.identity,
+ 'name': vol.name,
+ 'controller': stor.identity,
+ 'size_gb': int(vol.capacity_bytes / units.Gi),
+ 'raid_level': next(
+ key for key, value in RAID_LEVELS.items()
+ if value['raid_type'] == vol.raid_type)
+ }
+ logical_disks.append(logical_disk)
+
+ raid_common.update_raid_info(node, {'logical_disks': logical_disks})
+
+
class RedfishRAID(base.RAIDInterface):
def __init__(self):
@@ -813,6 +838,8 @@ class RedfishRAID(base.RAIDInterface):
deploy_opts = deploy_utils.build_agent_options(task.node)
task.driver.boot.prepare_ramdisk(task, deploy_opts)
manager_utils.node_power_action(task, states.REBOOT)
+ else:
+ update_raid_config(node)
return self.post_create_configuration(
task, raid_configs, return_state=return_state)
@@ -842,6 +869,8 @@ class RedfishRAID(base.RAIDInterface):
deploy_opts = deploy_utils.build_agent_options(task.node)
task.driver.boot.prepare_ramdisk(task, deploy_opts)
manager_utils.node_power_action(task, states.REBOOT)
+ else:
+ update_raid_config(node)
return self.post_delete_configuration(
task, raid_configs, return_state=return_state)
@@ -972,6 +1001,7 @@ class RedfishRAID(base.RAIDInterface):
task.upgrade_lock()
self._clear_raid_configs(node)
+ update_raid_config(node)
except exception.NodeNotFound:
LOG.info('During _query_raid_config_failed, node '
@@ -1109,6 +1139,7 @@ class RedfishRAID(base.RAIDInterface):
self._clear_raid_configs(node)
LOG.info('RAID configuration completed for node %(node)s',
{'node': node.uuid})
+ update_raid_config(task.node)
if task.node.clean_step:
manager_utils.notify_conductor_resume_clean(task)
else:
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_raid.py b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
index 31610c7c2..0f7cf320f 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
@@ -47,13 +47,16 @@ def _mock_drive(identity, block_size_bytes=None, capacity_bytes=None,
)
-def _mock_volume(identity, volume_type=None, raid_type=None):
+def _mock_volume(identity, volume_type=None, raid_type=None,
+ capacity_bytes=units.Gi):
volume = mock.MagicMock(
_path='/redfish/v1/Systems/1/Storage/1/Volumes/' + identity,
identity=identity,
volume_type=volume_type,
- raid_type=raid_type
+ raid_type=raid_type,
+ capacity_bytes=capacity_bytes
)
+ volume.name = 'Volume ' + identity
# Mocking Immediate that does not return anything
volume.delete.return_value = None
return volume
@@ -263,6 +266,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.mock_storage.volumes.create.assert_called_once_with(
expected_payload, apply_time=None
)
+ # Async operation, raid_config shouldn't be updated yet
+ self.assertEqual({}, task.node.raid_config)
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@@ -288,7 +293,12 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
}
]
}
+ created_volumes = [_mock_volume(
+ '1', raid_type=sushy.RAID_TYPE_RAID5,
+ capacity_bytes=100 * units.Gi)]
volumes = mock.MagicMock()
+ # Called after volumes created
+ volumes.get_members.return_value = created_volumes
op_apply_time_support = mock.MagicMock()
op_apply_time_support.mapped_supported_values = [
sushy.APPLY_TIME_IMMEDIATE, sushy.APPLY_TIME_ON_RESET]
@@ -326,6 +336,13 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(mock_node_power_action.call_count, 0)
self.assertEqual(mock_build_agent_options.call_count, 0)
self.assertEqual(mock_prepare_ramdisk.call_count, 0)
+ self.assertEqual(
+ [{'controller': 'RAID controller 1',
+ 'id': '1',
+ 'name': 'Volume 1',
+ 'raid_level': '5',
+ 'size_gb': 100}],
+ task.node.raid_config['logical_disks'])
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@@ -390,6 +407,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
mock_node_power_action.assert_called_once_with(task, states.REBOOT)
mock_build_agent_options.assert_called_once_with(task.node)
self.assertEqual(mock_prepare_ramdisk.call_count, 1)
+ # Async operation, raid_config shouldn't be updated yet
+ self.assertEqual({}, task.node.raid_config)
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@@ -422,9 +441,16 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
}
]
}
+ created_volumes = [
+ _mock_volume('1', raid_type=sushy.RAID_TYPE_RAID5,
+ capacity_bytes=100 * units.Gi),
+ _mock_volume('2', raid_type=sushy.RAID_TYPE_RAID1,
+ capacity_bytes=500 * units.Gi)]
resource = mock.MagicMock(spec=['resource_name'])
resource.resource_name = 'volume'
self.mock_storage.volumes.create.return_value = resource
+ # Called after volumes created
+ self.mock_storage.volumes.get_members.return_value = created_volumes
mock_get_system.return_value.storage.get_members.return_value = [
self.mock_storage]
self.node.target_raid_config = target_raid_config
@@ -466,6 +492,18 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.mock_storage.volumes.create.assert_any_call(
expected_payload2, apply_time=None
)
+ self.assertEqual(
+ [{'controller': 'RAID controller 1',
+ 'id': '1',
+ 'name': 'Volume 1',
+ 'raid_level': '5',
+ 'size_gb': 100},
+ {'controller': 'RAID controller 1',
+ 'id': '2',
+ 'name': 'Volume 2',
+ 'raid_level': '1',
+ 'size_gb': 500}],
+ task.node.raid_config['logical_disks'])
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@@ -548,6 +586,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(
expected_raid_configs,
task.node.driver_internal_info.get('raid_configs'))
+ # Async operation, raid_config shouldn't be updated yet
+ self.assertEqual({}, task.node.raid_config)
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@@ -604,6 +644,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.mock_storage.volumes.create.assert_called_once_with(
expected_payload, apply_time=None
)
+ # Async operation, raid_config shouldn't be updated yet
+ self.assertEqual({}, task.node.raid_config)
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@@ -662,6 +704,15 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
resource = mock.MagicMock(spec=['resource_name'])
resource.resource_name = 'volume'
self.mock_storage.volumes.create.return_value = resource
+ created_volumes = [
+ _mock_volume(
+ '1', raid_type=sushy.RAID_TYPE_RAID10,
+ capacity_bytes=50 * units.Gi),
+ _mock_volume(
+ '2', raid_type=sushy.RAID_TYPE_RAID5,
+ capacity_bytes=100 * units.Gi)]
+ # Called after volumes created
+ self.mock_storage.volumes.get_members.return_value = created_volumes
mock_get_system.return_value.storage.get_members.return_value = [
self.mock_storage]
self.node.target_raid_config = target_raid_config
@@ -705,6 +756,19 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.mock_storage.volumes.create.assert_any_call(
expected_payload2, apply_time=None
)
+ # Async operation, raid_config shouldn't be updated yet
+ self.assertEqual(
+ [{'controller': 'RAID controller 1',
+ 'id': '1',
+ 'name': 'Volume 1',
+ 'raid_level': '1+0',
+ 'size_gb': 50},
+ {'controller': 'RAID controller 1',
+ 'id': '2',
+ 'name': 'Volume 2',
+ 'raid_level': '5',
+ 'size_gb': 100}],
+ task.node.raid_config['logical_disks'])
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@@ -891,6 +955,15 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
resource = mock.MagicMock(spec=['resource_name'])
resource.resource_name = 'volume'
self.mock_storage.volumes.create.return_value = resource
+ created_volumes = [
+ _mock_volume(
+ '1', raid_type=sushy.RAID_TYPE_RAID5,
+ capacity_bytes=100 * units.Gi),
+ _mock_volume(
+ '2', raid_type=sushy.RAID_TYPE_RAID1,
+ capacity_bytes=500 * units.Gi)]
+ # Called after volumes created
+ self.mock_storage.volumes.get_members.return_value = created_volumes
mock_get_system.return_value.storage.get_members.return_value = [
self.mock_storage]
self.node.target_raid_config = target_raid_config
@@ -932,6 +1005,18 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.mock_storage.volumes.create.assert_any_call(
expected_payload2, apply_time=None
)
+ self.assertEqual(
+ [{'controller': 'RAID controller 1',
+ 'id': '1',
+ 'name': 'Volume 1',
+ 'raid_level': '5',
+ 'size_gb': 100},
+ {'controller': 'RAID controller 1',
+ 'id': '2',
+ 'name': 'Volume 2',
+ 'raid_level': '1',
+ 'size_gb': 500}],
+ task.node.raid_config['logical_disks'])
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@@ -951,17 +1036,27 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
mock_volumes = []
for i in ["1", "2"]:
mock_volumes.append(_mock_volume(
- i, volume_type='Mirrored', raid_type='RAID1'))
+ i, volume_type='Mirrored', raid_type=sushy.RAID_TYPE_RAID1))
op_apply_time_support = mock.MagicMock()
op_apply_time_support.mapped_supported_values = [
sushy.APPLY_TIME_IMMEDIATE, sushy.APPLY_TIME_ON_RESET]
self.mock_storage.volumes.operation_apply_time_support = (
op_apply_time_support)
- self.mock_storage.volumes.get_members.return_value = mock_volumes
+ # 2nd call to mock no volumes after delete
+ self.mock_storage.volumes.get_members.side_effect = [mock_volumes, []]
mock_get_system.return_value.storage.get_members.return_value = [
self.mock_storage]
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
+ last_updated = '2022-05-18 08:49:17.585443'
+ task.node.raid_config = {
+ 'logical_disks': [{
+ 'controller': 'RAID controller 1',
+ 'id': '1',
+ 'name': 'Volume 1',
+ 'raid_level': '1',
+ 'size_gb': 100}],
+ 'last_updated': last_updated}
task.driver.raid.delete_configuration(task)
self.assertEqual(mock_volumes[0].delete.call_count, 1)
self.assertEqual(mock_volumes[1].delete.call_count, 1)
@@ -971,6 +1066,9 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(mock_node_power_action.call_count, 0)
self.assertEqual(mock_build_agent_options.call_count, 0)
self.assertEqual(mock_prepare_ramdisk.call_count, 0)
+ self.assertEqual([], task.node.raid_config['logical_disks'])
+ self.assertNotEqual(
+ last_updated, task.node.raid_config['last_updated'])
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
spec_set=True, autospec=True)
@@ -990,7 +1088,7 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
mock_volumes = []
for i in ["1", "2"]:
mock_volumes.append(_mock_volume(
- i, volume_type='Mirrored', raid_type='RAID1'))
+ i, volume_type='Mirrored', raid_type=sushy.RAID_TYPE_RAID1))
op_apply_time_support = mock.MagicMock()
op_apply_time_support.mapped_supported_values = [
sushy.APPLY_TIME_ON_RESET]
@@ -1005,6 +1103,15 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
mock_volumes[1].delete.return_value = task_mon
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
+ raid_config = {
+ 'logical_disks': [{
+ 'controller': 'RAID controller 1',
+ 'id': '1',
+ 'name': 'Volume 1',
+ 'raid_level': '1',
+ 'size_gb': 100}],
+ 'last_updated': '2022-05-18 08:49:17.585443'}
+ task.node.raid_config = raid_config
task.driver.raid.delete_configuration(task)
self.assertEqual(mock_volumes[0].delete.call_count, 1)
self.assertEqual(mock_volumes[1].delete.call_count, 0)
@@ -1020,6 +1127,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
'pending': True,
'task_monitor_uri': ['/TaskService/123']},
task.node.driver_internal_info.get('raid_configs'))
+ # Async operation, raid_config shouldn't be updated yet
+ self.assertEqual(raid_config, task.node.raid_config)
def test_volume_create_error_handler(self, mock_get_system):
volume_collection = self.mock_storage.volumes
@@ -1251,6 +1360,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
mock_resume_deploy.assert_called_with(task)
mock_resume_clean.assert_not_called()
+ self.assertEqual([], task.node.raid_config['logical_disks'])
+ self.assertIsNotNone(task.node.raid_config.get('last_updated'))
@mock.patch.object(manager_utils, 'notify_conductor_resume_clean',
autospec=True)
@@ -1281,6 +1392,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
mock_resume_deploy.assert_not_called()
mock_resume_clean.assert_called_with(task)
+ self.assertEqual([], task.node.raid_config['logical_disks'])
+ self.assertIsNotNone(task.node.raid_config.get('last_updated'))
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(redfish_utils, 'get_task_monitor',
@@ -1330,6 +1443,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
raid, task, raid_configs['pending'])
mock_submit_delete.assert_not_called()
mock_build_agent_opt.assert_called_with(task.node)
+ # Not yet updated as in progress
+ self.assertEqual({}, task.node.raid_config)
@mock.patch.object(redfish_utils, 'get_task_monitor',
autospec=True)
@@ -1367,3 +1482,5 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
mock_submit_create.assert_not_called()
mock_submit_delete.assert_called_with(raid, task)
mock_build_agent_opt.assert_not_called()
+ # Not yet updated as in progress
+ self.assertEqual({}, task.node.raid_config)
diff --git a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py
index 7bcd4e007..ff04c11a0 100644
--- a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py
+++ b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py
@@ -181,6 +181,9 @@ SUSHY_SPEC = (
'SEVERITY_WARNING',
'SEVERITY_CRITICAL',
'MANAGER_TYPE_BMC',
+ 'RAID_TYPE_RAID1',
+ 'RAID_TYPE_RAID5',
+ 'RAID_TYPE_RAID10',
)
SUSHY_AUTH_SPEC = (
diff --git a/ironic/tests/unit/drivers/third_party_driver_mocks.py b/ironic/tests/unit/drivers/third_party_driver_mocks.py
index dc7af34b9..cb63cc55b 100644
--- a/ironic/tests/unit/drivers/third_party_driver_mocks.py
+++ b/ironic/tests/unit/drivers/third_party_driver_mocks.py
@@ -249,6 +249,9 @@ if not sushy:
SEVERITY_WARNING='warning',
SEVERITY_CRITICAL='critical',
MANAGER_TYPE_BMC='bmc',
+ RAID_TYPE_RAID1='RAID1',
+ RAID_TYPE_RAID5='RAID5',
+ RAID_TYPE_RAID10='RAID10',
)
sys.modules['sushy'] = sushy
diff --git a/releasenotes/notes/fix-redfish-raid-config-9e868c3e069475a1.yaml b/releasenotes/notes/fix-redfish-raid-config-9e868c3e069475a1.yaml
new file mode 100644
index 000000000..8f5c16c9b
--- /dev/null
+++ b/releasenotes/notes/fix-redfish-raid-config-9e868c3e069475a1.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes ``redfish`` and ``idrac-redfish`` RAID ``create_configuration``,
+ ``apply_configuration``, ``delete_configuration`` clean and deploy steps to
+ update node's ``raid_config`` field at the end of the steps.