summaryrefslogtreecommitdiff
path: root/ironic/nova
diff options
context:
space:
mode:
authorAdam Gandelman <adamg@ubuntu.com>2014-06-24 15:34:42 -0700
committerAdam Gandelman <adamg@ubuntu.com>2014-06-30 16:38:29 -0700
commitb880153f078255eb770ff1bb79335f533b8dcac4 (patch)
tree71e2065235b24a86ead7be79dd78ce4f5bc65c82 /ironic/nova
parentaf4437eb251d5e52723939ac8f9e7f5cae51d58b (diff)
downloadironic-b880153f078255eb770ff1bb79335f533b8dcac4.tar.gz
Update Nova's available resources at termination
This updates ClusteredComputeManager to override terminate_instance() with a call to update_available_resource(). The goal here is to limit the lag between when an Ironic node has been freed and when it will become available again to the Nova. This avoids scheduling errors against Ironic node inventories with high utilization and/or limited node resources. Change-Id: I767cb97d9b182f1f9528d078b07a85ffbad313ba Closes-bug: #1334027
Diffstat (limited to 'ironic/nova')
-rw-r--r--ironic/nova/compute/manager.py24
1 files changed, 24 insertions, 0 deletions
diff --git a/ironic/nova/compute/manager.py b/ironic/nova/compute/manager.py
index b764e49a7..1e2d04f50 100644
--- a/ironic/nova/compute/manager.py
+++ b/ironic/nova/compute/manager.py
@@ -23,10 +23,14 @@ work. The goal here is to generalise the areas where n-c talking to a clustered
hypervisor has issues, and long term fold them into the main ComputeManager.
"""
+from nova.openstack.common import lockutils
from nova.compute import manager
import nova.context
+CCM_SEMAPHORE='clustered_compute_manager'
+
+
class ClusteredComputeManager(manager.ComputeManager):
def init_host(self):
@@ -73,3 +77,23 @@ class ClusteredComputeManager(manager.ComputeManager):
self.update_available_resource(nova.context.get_admin_context())
except Exception:
pass
+
+ @lockutils.synchronized(CCM_SEMAPHORE, 'ironic-')
+ def _update_resources(self):
+ """Updates resources while protecting against a race on
+ self._resource_tracker_dict.
+ """
+ self.update_available_resource(nova.context.get_admin_context())
+
+ def terminate_instance(self, context, instance, bdms, reservations):
+ """Terminate an instance on a node.
+
+ We override this method and force a post-termination update to Nova's
+ resources. This avoids having to wait for a Nova periodic task tick
+ before nodes can be reused.
+ """
+ super(ClusteredComputeManager, self).terminate_instance(context,
+ instance,
+ bdms,
+ reservations)
+ self._update_resources()