summaryrefslogtreecommitdiff
path: root/ironic/tests/unit/conductor/test_task_manager.py
diff options
context:
space:
mode:
authorJim Rollenhagen <jim@jimrollenhagen.com>2016-09-01 13:48:36 +0000
committerJim Rollenhagen <jim@jimrollenhagen.com>2016-09-01 14:10:12 +0000
commit88fc5166785fa420bc93a06a81d5b3900b3fe666 (patch)
tree8bf5d53961b606f702f1c935bc3e60a20c57203d /ironic/tests/unit/conductor/test_task_manager.py
parent739355c091f2817517b6f6dfd8bdb2c45eee0b35 (diff)
downloadironic-88fc5166785fa420bc93a06a81d5b3900b3fe666.tar.gz
Refresh fsm in task when a shared lock is upgraded
If the fsm is advanced while a task is holding a shared lock, and then the task upgrades the lock to an exclusive lock, the fsm object for the task still is in the old state. If the now-exclusive lock attempts to (correctly)advance the state machine, it may explode, thinking it's in the old state and the transition is invalid. Use a getter/setter for self.node in the TaskManager object, so that whenever we set a node object (like we do when upgrading a lock), the fsm will be re-initialized and in sync with reality. The task manager also still has a crutch for nodes in the NOSTATE provision_state, for compatibility with Juno, which was to be removed in Kilo, but nobody did. Instead of moving this crutch into the setter as well, we remove it (and its unit test) here. We also update the unit test nodes to use provision_state AVAILABLE by default, as around 1000 unit tests use the default and begin failing in the task manager if the default is NOSTATE. Last, we remove NOSTATE from states.ALLOWED_DELETE_STATES, as we should never have a node in NOSTATE by now, and deleting the crutch causes the test for this attribute to fail. Change-Id: I0a0277742d512a8ad6e41f25d1c04c13fcf8d6a2 Closes-Bug: #1619232
Diffstat (limited to 'ironic/tests/unit/conductor/test_task_manager.py')
-rw-r--r--ironic/tests/unit/conductor/test_task_manager.py22
1 files changed, 22 insertions, 0 deletions
diff --git a/ironic/tests/unit/conductor/test_task_manager.py b/ironic/tests/unit/conductor/test_task_manager.py
index ca652d298..8246b71a2 100644
--- a/ironic/tests/unit/conductor/test_task_manager.py
+++ b/ironic/tests/unit/conductor/test_task_manager.py
@@ -396,6 +396,28 @@ class TaskManagerTestCase(tests_db_base.DbTestCase):
get_ports_mock.assert_called_once_with(self.context, self.node.id)
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
+ def test_upgrade_lock_refreshes_fsm(self, get_portgroups_mock,
+ get_ports_mock, build_driver_mock,
+ reserve_mock, release_mock,
+ node_get_mock):
+ reserve_mock.return_value = self.node
+ node_get_mock.return_value = self.node
+ with task_manager.acquire(self.context, 'fake-node-id',
+ shared=True) as task1:
+ self.assertEqual(states.AVAILABLE, task1.node.provision_state)
+
+ with task_manager.acquire(self.context, 'fake-node-id',
+ shared=False) as task2:
+ # move the node to manageable
+ task2.process_event('manage')
+ self.assertEqual(states.MANAGEABLE, task1.node.provision_state)
+
+ # now upgrade our shared task and try to go to cleaning
+ # this will explode if task1's FSM doesn't get refreshed
+ task1.upgrade_lock()
+ task1.process_event('provide')
+ self.assertEqual(states.CLEANING, task1.node.provision_state)
+
def test_spawn_after(
self, get_portgroups_mock, get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):