summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Krelle <nobodycam@gmail.com>2014-03-04 13:20:30 -0800
committerDevananda van der Veen <devananda.vdv@gmail.com>2014-03-05 16:30:15 -0800
commitf616e239c778fb122fdab38b81a3868ccc120338 (patch)
treef82c20692320b239575a15195302b8be5d0ee416
parent9654a4e968fc1469cf2d1f567efa80dee65da5ab (diff)
downloadironic-f616e239c778fb122fdab38b81a3868ccc120338.tar.gz
Import Nova "ironic" driver
Import the Nova "ironic" driver from the Nova review queue. This Nova driver will not be present in the Icehouse release of Nova, but is required for Ironic functionality and can be installed as an out-of-tree driver. Co-Author: Lucas Alvares Gomes <lucasagomes@gmail.com> Co-Author: Devananda van der Veen <devananda.vdv@gmail.com> Change-Id: I3351dee1a4b2dfb50317ce85dffe8012f0feca6c
-rw-r--r--ironic/nova/__init__.py0
-rw-r--r--ironic/nova/scheduler/__init__.py0
-rw-r--r--ironic/nova/scheduler/ironic_host_manager.py78
-rw-r--r--ironic/nova/tests/scheduler/ironic_fakes.py87
-rw-r--r--ironic/nova/tests/scheduler/test_ironic_host_manager.py430
-rw-r--r--ironic/nova/tests/virt/ironic/__init__.py0
-rw-r--r--ironic/nova/tests/virt/ironic/test_driver.py836
-rw-r--r--ironic/nova/virt/__init__.py0
-rw-r--r--ironic/nova/virt/ironic/__init__.py18
-rw-r--r--ironic/nova/virt/ironic/driver.py605
-rw-r--r--ironic/nova/virt/ironic/ironic_driver_fields.py56
-rw-r--r--ironic/nova/virt/ironic/ironic_states.py66
-rwxr-xr-xtools/config/generate_sample.sh2
13 files changed, 2177 insertions, 1 deletions
diff --git a/ironic/nova/__init__.py b/ironic/nova/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ironic/nova/__init__.py
diff --git a/ironic/nova/scheduler/__init__.py b/ironic/nova/scheduler/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ironic/nova/scheduler/__init__.py
diff --git a/ironic/nova/scheduler/ironic_host_manager.py b/ironic/nova/scheduler/ironic_host_manager.py
new file mode 100644
index 000000000..b9bba07ba
--- /dev/null
+++ b/ironic/nova/scheduler/ironic_host_manager.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Ironic host manager.
+
+This host manager will consume all cpu's, disk space, and
+ram from a host / node as it is supporting Baremetal hosts, which can not be
+subdivided into multiple instances.
+"""
+
+from nova.openstack.common import log as logging
+from nova.scheduler import host_manager
+
+LOG = logging.getLogger(__name__)
+
+
+class IronicNodeState(host_manager.HostState):
+ """Mutable and immutable information tracked for a host.
+ This is an attempt to remove the ad-hoc data structures
+ previously used and lock down access.
+ """
+
+ def update_from_compute_node(self, compute):
+ """Update information about a host from its compute_node info."""
+ all_ram_mb = compute['memory_mb']
+
+ free_disk_mb = compute['free_disk_gb'] * 1024
+ free_ram_mb = compute['free_ram_mb']
+
+ self.free_ram_mb = free_ram_mb
+ self.total_usable_ram_mb = all_ram_mb
+ self.free_disk_mb = free_disk_mb
+ self.vcpus_total = compute['vcpus']
+ self.vcpus_used = compute['vcpus_used']
+
+ def consume_from_instance(self, instance):
+ """Consume nodes entire resources regardless of instance request."""
+ self.free_ram_mb = 0
+ self.free_disk_mb = 0
+ self.vcpus_used = self.vcpus_total
+
+
+def new_host_state(self, host, node, capabilities=None, service=None):
+ """Returns an instance of IronicNodeState or HostState according to
+ capabilities. If 'ironic_driver' is in capabilities, it returns an
+ instance of IronicHostState. If not, returns an instance of HostState.
+ """
+ if capabilities is None:
+ capabilities = {}
+ cap = capabilities.get('compute', {})
+ if bool(cap.get('ironic_driver')):
+ return IronicNodeState(host, node, capabilities, service)
+ else:
+ return host_manager.HostState(host, node, capabilities, service)
+
+
+class IronicHostManager(host_manager.HostManager):
+ """Ironic HostManager class."""
+
+ # Override.
+ # Yes, this is not a class, and it is OK
+ host_state_cls = new_host_state
+
+ def __init__(self):
+ super(IronicHostManager, self).__init__()
diff --git a/ironic/nova/tests/scheduler/ironic_fakes.py b/ironic/nova/tests/scheduler/ironic_fakes.py
new file mode 100644
index 000000000..ddbb8a736
--- /dev/null
+++ b/ironic/nova/tests/scheduler/ironic_fakes.py
@@ -0,0 +1,87 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake nodes for Ironic host manager tests.
+"""
+
+
+COMPUTE_NODES = [
+ dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386'),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=10, free_ram_mb=1024),
+ dict(id=2, local_gb=20, memory_mb=2048, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386'),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=20, free_ram_mb=2048),
+ dict(id=3, local_gb=30, memory_mb=3072, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386'),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=30, free_ram_mb=3072),
+ dict(id=4, local_gb=40, memory_mb=4096, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386'),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=40, free_ram_mb=4096),
+ # Broken entry
+ dict(id=5, local_gb=50, memory_mb=5120, vcpus=1, service=None,
+ cpu_info='baremetal cpu',
+ stats=dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386'),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=50, free_ram_mb=5120),
+]
+
+
+IRONIC_SERVICE_STATE = {
+ ('host1', 'node1uuid'): {'compute': {'ironic_driver':
+ "nova.virt.ironic.driver.IronicDriver"}},
+ ('host2', 'node2uuid'): {'compute': {'ironic_driver':
+ "nova.virt.ironic.driver.IronicDriver"}},
+ ('host3', 'node3uuid'): {'compute': {'ironic_driver':
+ "nova.virt.ironic.driver.IronicDriver"}},
+ ('host4', 'node4uuid'): {'compute': {'ironic_driver':
+ "nova.virt.ironic.driver.IronicDriver"}},
+ ('host5', 'node5uuid'): {'compute': {'ironic_driver':
+ "nova.virt.ironic.driver.IronicDriver"}},
+}
diff --git a/ironic/nova/tests/scheduler/test_ironic_host_manager.py b/ironic/nova/tests/scheduler/test_ironic_host_manager.py
new file mode 100644
index 000000000..e25e58820
--- /dev/null
+++ b/ironic/nova/tests/scheduler/test_ironic_host_manager.py
@@ -0,0 +1,430 @@
+# Copyright (c) 2014 OpenStack Foundation
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For IronicHostManager
+"""
+
+from nova import db
+from nova import exception
+from nova.openstack.common import timeutils
+from nova.scheduler import filters
+from nova.scheduler import ironic_host_manager
+from nova import test
+from nova.tests.scheduler import ironic_fakes
+
+
+class FakeFilterClass1(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class FakeFilterClass2(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class IronicHostManagerTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ self.fake_hosts = [ironic_host_manager.IronicNodeState(
+ 'fake_host%s' % x, 'fake-node') for x in xrange(1, 5)]
+ self.fake_hosts += [ironic_host_manager.IronicNodeState(
+ 'fake_multihost', 'fake-node%s' % x) for x in xrange(1, 5)]
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_get_all_host_states(self):
+ # Ensure .service is set and we have the values we expect to.
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+
+ self.assertEqual(len(host_states_map), 4)
+ # Check that .service is set properly
+ for i in xrange(4):
+ compute_node = ironic_fakes.COMPUTE_NODES[i]
+ host = compute_node['service']['host']
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(compute_node['service'],
+ host_states_map[state_key].service)
+ # check we have the values we think we should.
+ self.assertEqual(1024,
+ host_states_map[('host1', 'node1uuid')].free_ram_mb)
+ self.assertEqual(10240,
+ host_states_map[('host1', 'node1uuid')].free_disk_mb)
+ self.assertEqual(2048,
+ host_states_map[('host2', 'node2uuid')].free_ram_mb)
+ self.assertEqual(20480,
+ host_states_map[('host2', 'node2uuid')].free_disk_mb)
+ self.assertEqual(3072,
+ host_states_map[('host3', 'node3uuid')].free_ram_mb)
+ self.assertEqual(30720,
+ host_states_map[('host3', 'node3uuid')].free_disk_mb)
+ self.assertEqual(4096,
+ host_states_map[('host4', 'node4uuid')].free_ram_mb)
+ self.assertEqual(40960,
+ host_states_map[('host4', 'node4uuid')].free_disk_mb)
+
+
+class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerChangedNodesTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ self.fake_hosts = [
+ ironic_host_manager.IronicNodeState('host1', 'node1uuid'),
+ ironic_host_manager.IronicNodeState('host2', 'node2uuid'),
+ ironic_host_manager.IronicNodeState('host3', 'node3uuid'),
+ ironic_host_manager.IronicNodeState('host4', 'node4uuid')
+ ]
+ self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ stats=dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386'),
+ supported_instances=
+ '[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=10, free_ram_mb=1024)
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_get_all_host_states(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(4, len(host_states_map))
+
+ def test_get_all_host_states_after_delete_one(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove node4 for second call
+ running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
+ if n.get('hypervisor_hostname') != 'node4uuid']
+ db.compute_node_get_all(context).AndReturn(running_nodes)
+ self.mox.ReplayAll()
+
+ self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(3, len(host_states_map))
+
+ def test_get_all_host_states_after_delete_all(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove all nodes for second call
+ db.compute_node_get_all(context).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(0, len(host_states_map))
+
+ def test_update_from_compute_node(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ self.assertEqual(1024, host.free_ram_mb)
+ self.assertEqual(1024, host.total_usable_ram_mb)
+ self.assertEqual(10240, host.free_disk_mb)
+ self.assertEqual(1, host.vcpus_total)
+ self.assertEqual(0, host.vcpus_used)
+
+ def test_consume_identical_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_larger_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_smaller_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+
+class IronicHostManagerTestFilters(test.NoDBTestCase):
+ """Test filters work for IronicHostManager."""
+
+ def setUp(self):
+ super(IronicHostManagerTestFilters, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ self.fake_hosts = [ironic_host_manager.IronicNodeState(
+ 'fake_host%s' % x, 'fake-node') for x in xrange(1, 5)]
+ self.fake_hosts += [ironic_host_manager.IronicNodeState(
+ 'fake_multihost', 'fake-node%s' % x) for x in xrange(1, 5)]
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_choose_host_filters_not_found(self):
+ self.flags(scheduler_default_filters='FakeFilterClass3')
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+ self.assertRaises(exception.SchedulerHostFilterNotFound,
+ self.host_manager._choose_host_filters, None)
+
+ def test_choose_host_filters(self):
+ self.flags(scheduler_default_filters=['FakeFilterClass2'])
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(1, len(filter_classes))
+ self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
+
+ def _mock_get_filtered_hosts(self, info, specified_filters=None):
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+
+ info['got_objs'] = []
+ info['got_fprops'] = []
+
+ def fake_filter_one(_self, obj, filter_props):
+ info['got_objs'].append(obj)
+ info['got_fprops'].append(filter_props)
+ return True
+
+ self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.host_manager._choose_host_filters(specified_filters).AndReturn(
+ [FakeFilterClass1])
+
+ def _verify_result(self, info, result, filters=True):
+ for x in info['got_fprops']:
+ self.assertEqual(x, info['expected_fprops'])
+ if filters:
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(info['expected_objs']), set(result))
+
+ def test_get_filtered_hosts(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_specified_filters(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info, specified_filters)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties, filter_class_names=specified_filters)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5', 'fake_multihost']}
+
+ # [1] and [3] are host2 and host4
+ info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [0] and [2] are host1 and host3
+ info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
+ # Ensure ignore_hosts processed before force_hosts in host filters.
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+
+ # only fake_host3 should be left.
+ info = {'expected_objs': [self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
+ # Ensure all nodes returned for a host with many nodes
+ fake_properties = {'force_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
+ self.fake_hosts[6], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_nodes(self):
+ fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
+ 'fake-node9']}
+
+ # [5] is fake-node2, [7] is fake-node4
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
+ # Ensure only overlapping results if both force host and node
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
+ 'force_nodes': ['fake-node2', 'fake-node9']}
+
+ # [5] is fake-node2
+ info = {'expected_objs': [self.fake_hosts[5]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
+ # Ensure non-overlapping force_node and force_host yield no result
+ fake_properties = {'force_hosts': ['fake_multihost'],
+ 'force_nodes': ['fake-node']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
+ # Ensure ignore_hosts can coexist with force_nodes
+ fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
+ 'ignore_hosts': ['fake_host1', 'fake_host2']}
+
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
+ # Ensure ignore_hosts is processed before force_nodes
+ fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
+ 'ignore_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
diff --git a/ironic/nova/tests/virt/ironic/__init__.py b/ironic/nova/tests/virt/ironic/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ironic/nova/tests/virt/ironic/__init__.py
diff --git a/ironic/nova/tests/virt/ironic/test_driver.py b/ironic/nova/tests/virt/ironic/test_driver.py
new file mode 100644
index 000000000..c337c7474
--- /dev/null
+++ b/ironic/nova/tests/virt/ironic/test_driver.py
@@ -0,0 +1,836 @@
+# coding=utf-8
+#
+# Copyright 2014 Red Hat, Inc.
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the ironic driver."""
+
+from ironicclient import client as ironic_client
+from ironicclient import exc as ironic_exception
+import mock
+from oslo.config import cfg
+
+from nova.compute import power_state as nova_states
+from nova import context as nova_context
+from nova import exception
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests import fake_instance
+from nova.tests import utils
+from nova.virt import fake
+from nova.virt.ironic import driver as ironic_driver
+from nova.virt.ironic import ironic_states
+
+
+CONF = cfg.CONF
+
+IRONIC_FLAGS = dict(
+ instance_type_extra_specs=['test_spec:test_value'],
+ api_version=1,
+ group='ironic',
+)
+
+
+def get_test_validation(**kw):
+ return type('interfaces', (object,),
+ {'power': kw.get('power', True),
+ 'deploy': kw.get('deploy', True),
+ 'console': kw.get('console', True),
+ 'rescue': kw.get('rescue', True)})()
+
+
+def get_test_node(**kw):
+ return type('node', (object,),
+ {'uuid': kw.get('uuid', 'eeeeeeee-dddd-cccc-bbbb-aaaaaaaaaaaa'),
+ 'chassis_uuid': kw.get('chassis_uuid'),
+ 'power_state': kw.get('power_state',
+ ironic_states.NOSTATE),
+ 'target_power_state': kw.get('target_power_state',
+ ironic_states.NOSTATE),
+ 'provision_state': kw.get('provision_state',
+ ironic_states.NOSTATE),
+ 'target_provision_state': kw.get('target_provision_state',
+ ironic_states.NOSTATE),
+ 'last_error': kw.get('last_error'),
+ 'instance_uuid': kw.get('instance_uuid'),
+ 'driver': kw.get('driver', 'fake'),
+ 'driver_info': kw.get('driver_info', {}),
+ 'properties': kw.get('properties', {}),
+ 'reservation': kw.get('reservation'),
+ 'maintenance': kw.get('maintenance', False),
+ 'extra': kw.get('extra', {}),
+ 'updated_at': kw.get('created_at'),
+ 'created_at': kw.get('updated_at')})()
+
+
+def get_test_port(**kw):
+ return type('port', (object,),
+ {'uuid': kw.get('uuid', 'gggggggg-uuuu-qqqq-ffff-llllllllllll'),
+ 'node_uuid': kw.get('node_uuid', get_test_node().uuid),
+ 'address': kw.get('address', 'FF:FF:FF:FF:FF:FF'),
+ 'extra': kw.get('extra', {}),
+ 'created_at': kw.get('created_at'),
+ 'updated_at': kw.get('updated_at')})()
+
+
+class FakePortClient(object):
+
+ def get(self, port_uuid):
+ pass
+
+ def update(self, port_uuid, patch):
+ pass
+
+
+class FakeNodeClient(object):
+
+ def list(self):
+ return []
+
+ def get(self, node_uuid):
+ pass
+
+ def get_by_instance_uuid(self, instance_uuid):
+ pass
+
+ def list_ports(self, node_uuid):
+ pass
+
+ def set_power_state(self, node_uuid, target):
+ pass
+
+ def set_provision_state(self, node_uuid, target):
+ pass
+
+ def update(self, node_uuid, patch):
+ pass
+
+ def validate(self, node_uuid):
+ pass
+
+
+class FakeClient(object):
+ node = FakeNodeClient()
+ port = FakePortClient()
+
+
+FAKE_CLIENT = FakeClient()
+
+
+class IronicDriverTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicDriverTestCase, self).setUp()
+ self.flags(**IRONIC_FLAGS)
+ self.driver = ironic_driver.IronicDriver(None)
+ self.driver.virtapi = fake.FakeVirtAPI()
+ self.ctx = nova_context.get_admin_context()
+ # mock _get_client
+ self.mock_cli_patcher = mock.patch.object(self.driver, '_get_client')
+ self.mock_cli = self.mock_cli_patcher.start()
+ self.mock_cli.return_value = FAKE_CLIENT
+
+ def stop_patchers():
+ if self.mock_cli:
+ self.mock_cli_patcher.stop()
+
+ self.addCleanup(stop_patchers)
+
+ def test_validate_driver_loading(self):
+ self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
+
+ def test_get_hypervisor_type(self):
+ self.assertEqual(self.driver.get_hypervisor_type(), 'ironic')
+
+ def test_get_hypervisor_version(self):
+ self.assertEqual(self.driver.get_hypervisor_version(), 1)
+
+ def test__require_node(self):
+ node_uuid = '1b67c895-9ef8-42ac-a59c-7bf84fe24e82'
+ test_instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ self.assertEqual(self.driver._require_node(test_instance), node_uuid)
+
+ def test__get_client_no_context(self):
+ # stop _get_client mock
+ self.mock_cli_patcher.stop()
+ self.mock_cli = None
+
+ self.ctx.auth_token = None
+ with mock.patch.object(nova_context, 'get_admin_context') as mock_ctx:
+ mock_ctx.return_value = self.ctx
+ with mock.patch.object(ironic_client, 'get_client') as mock_ir_cli:
+ self.driver._get_client()
+ expected = {'os_username': CONF.ironic.admin_username,
+ 'os_password': CONF.ironic.admin_password,
+ 'os_auth_url': CONF.ironic.admin_url,
+ 'os_tenant_name': CONF.ironic.admin_tenant_name,
+ 'os_service_type': 'baremetal',
+ 'os_endpoint_type': 'public'}
+ mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
+ **expected)
+
+ def test__get_client_with_context(self):
+ # stop _get_client mock
+ self.mock_cli_patcher.stop()
+ self.mock_cli = None
+
+ self.ctx.auth_token = 'fake-token'
+ with mock.patch.object(nova_context, 'get_admin_context') as mock_ctx:
+ mock_ctx.return_value = self.ctx
+ with mock.patch.object(ironic_client, 'get_client') as mock_ir_cli:
+ self.driver._get_client()
+ expected = {'os_auth_token': self.ctx.auth_token,
+ 'ironic_url': CONF.ironic.api_endpoint}
+ mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
+ **expected)
+
+ def test__require_node(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ test_instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ self.assertEqual(node_uuid, self.driver._require_node(test_instance))
+
+ def test__require_node_fail(self):
+ test_instance = fake_instance.fake_instance_obj(self.ctx, node=None)
+ self.assertRaises(exception.NovaException,
+ self.driver._require_node, test_instance)
+
+ def test__node_resource(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ cpus = 2
+ mem = 512
+ disk = 10
+ arch = 'x86_64'
+ properties = {'cpus': cpus, 'memory_mb': mem,
+ 'local_gb': disk, 'cpu_arch': arch}
+ node = get_test_node(uuid=node_uuid,
+ instance_uuid=uuidutils.generate_uuid(),
+ properties=properties)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(cpus, result['vcpus'])
+ self.assertEqual(cpus, result['vcpus_used'])
+ self.assertEqual(mem, result['memory_mb'])
+ self.assertEqual(mem, result['memory_mb_used'])
+ self.assertEqual(disk, result['local_gb'])
+ self.assertEqual(disk, result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual('{"cpu_arch": "x86_64", "ironic_driver": "'
+ 'nova.virt.ironic.driver.IronicDriver", '
+ '"test_spec": "test_value"}',
+ result['stats'])
+
+ def test__node_resource_no_instance_uuid(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ cpus = 2
+ mem = 512
+ disk = 10
+ arch = 'x86_64'
+ properties = {'cpus': cpus, 'memory_mb': mem,
+ 'local_gb': disk, 'cpu_arch': arch}
+ node = get_test_node(uuid=node_uuid,
+ instance_uuid=None,
+ properties=properties)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(cpus, result['vcpus'])
+ self.assertEqual(0, result['vcpus_used'])
+ self.assertEqual(mem, result['memory_mb'])
+ self.assertEqual(0, result['memory_mb_used'])
+ self.assertEqual(disk, result['local_gb'])
+ self.assertEqual(0, result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual('{"cpu_arch": "x86_64", "ironic_driver": "'
+ 'nova.virt.ironic.driver.IronicDriver", '
+ '"test_spec": "test_value"}',
+ result['stats'])
+
+ def test__retry_on_conflict(self):
+ test_list = []
+
+ def test_func(test_list):
+ test_list.append(1)
+
+ self.driver._retry_on_conflict(test_func, test_list)
+ self.assertIn(1, test_list)
+
+ def test__retry_on_conflict_fail(self):
+ CONF.set_default('api_max_retries', default=1, group='ironic')
+ CONF.set_default('api_retry_interval', default=0, group='ironic')
+
+ def test_func():
+ raise ironic_exception.HTTPConflict()
+
+ self.assertRaises(ironic_driver.MaximumRetriesReached,
+ self.driver._retry_on_conflict, test_func)
+
+ def test__start_firewall(self):
+ func_list = ['setup_basic_filtering',
+ 'prepare_instance_filter',
+ 'apply_instance_filter']
+ patch_list = [mock.patch.object(self.driver.firewall_driver, func)
+ for func in func_list]
+ mock_list = [patcher.start() for patcher in patch_list]
+ for p in patch_list:
+ self.addCleanup(p.stop)
+
+ fake_inst = 'fake-inst'
+ fake_net_info = utils.get_test_network_info()
+ self.driver._start_firewall(fake_inst, fake_net_info)
+
+ # assert all methods were invoked with the right args
+ for m in mock_list:
+ m.assert_called_once_with(fake_inst, fake_net_info)
+
+ def test__stop_firewall(self):
+ fake_inst = 'fake-inst'
+ fake_net_info = utils.get_test_network_info()
+ with mock.patch.object(self.driver.firewall_driver,
+ 'unfilter_instance') as mock_ui:
+ self.driver._stop_firewall(fake_inst, fake_net_info)
+ mock_ui.assert_called_once_with(fake_inst, fake_net_info)
+
+ def test_list_instances(self):
+ num_nodes = 2
+ nodes = []
+ for n in range(num_nodes):
+ nodes.append(get_test_node(
+ instance_uuid=uuidutils.generate_uuid()))
+ # append a node w/o instance_uuid which shouldn't be listed
+ nodes.append(get_test_node(instance_uuid=None))
+
+ with mock.patch.object(FAKE_CLIENT.node, 'list') as mock_list:
+ mock_list.return_value = nodes
+
+ expected = [n for n in nodes if n.instance_uuid]
+ instances = self.driver.list_instances()
+ self.assertEqual(sorted(expected), sorted(instances))
+ self.assertEqual(num_nodes, len(instances))
+
+ def test_get_available_nodes(self):
+ num_nodes = 2
+ nodes = []
+ for n in range(num_nodes):
+ nodes.append(get_test_node(uuid=uuidutils.generate_uuid(),
+ power_state=ironic_states.POWER_OFF))
+ # append a node w/o power_state which shouldn't be listed
+ nodes.append(get_test_node(power_state=None))
+
+ with mock.patch.object(FAKE_CLIENT.node, 'list') as mock_list:
+ mock_list.return_value = nodes
+
+ expected = [n.uuid for n in nodes if n.power_state]
+ available_nodes = self.driver.get_available_nodes()
+ self.assertEqual(sorted(expected), sorted(available_nodes))
+ self.assertEqual(num_nodes, len(available_nodes))
+
+ def test_get_available_resource(self):
+ node = get_test_node()
+ fake_resource = 'fake-resource'
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+
+ with mock.patch.object(self.driver, '_node_resource') as mock_nr:
+ mock_nr.return_value = fake_resource
+
+ result = self.driver.get_available_resource(node.uuid)
+ self.assertEqual(fake_resource, result)
+ mock_nr.assert_called_once_with(node)
+
+ def test_get_info(self):
+ instance_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ properties = {'memory_mb': 512, 'cpus': 2}
+ power_state = ironic_states.POWER_ON
+ node = get_test_node(instance_uuid=instance_uuid,
+ properties=properties,
+ power_state=power_state)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid') \
+ as mock_gbiu:
+ mock_gbiu.return_value = node
+
+ # ironic_states.POWER_ON should me be mapped to
+ # nova_states.RUNNING
+ expected = {'state': nova_states.RUNNING,
+ 'max_mem': properties['memory_mb'],
+ 'mem': properties['memory_mb'],
+ 'num_cpu': properties['cpus'],
+ 'cpu_time': 0}
+ instance = fake_instance.fake_instance_obj('fake-context',
+ uuid=instance_uuid)
+ result = self.driver.get_info(instance)
+ self.assertEqual(expected, result)
+
+ def test_get_info_http_not_found(self):
+ with mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid') \
+ as mock_gbiu:
+ mock_gbiu.side_effect = ironic_exception.HTTPNotFound()
+
+ expected = {'state': nova_states.NOSTATE,
+ 'max_mem': 0,
+ 'mem': 0,
+ 'num_cpu': 0,
+ 'cpu_time': 0}
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, uuid=uuidutils.generate_uuid())
+ result = self.driver.get_info(instance)
+ self.assertEqual(expected, result)
+
+ def test_macs_for_instance(self):
+ node = get_test_node()
+ port = get_test_port()
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'list_ports') as mock_lp:
+ mock_lp.return_value = [port]
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ result = self.driver.macs_for_instance(instance)
+ self.assertEqual([port.address], result)
+ mock_lp.assert_called_once_with(node.uuid)
+
+ def test_macs_for_instance_http_not_found(self):
+ with mock.patch.object(FAKE_CLIENT.node, 'get') as mock_get:
+ mock_get.side_effect = ironic_exception.HTTPNotFound()
+
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=uuidutils.generate_uuid())
+ result = self.driver.macs_for_instance(instance)
+ self.assertEqual([], result)
+
+ def test_spawn(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ fake_flavor = 'fake-flavor'
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_fg = mock.patch.object(self.driver.virtapi, 'flavor_get').start()
+ mock_fg.return_value = fake_flavor
+ self.addCleanup(mock_fg.stop)
+ mock_validate = mock.patch.object(FAKE_CLIENT.node, 'validate').start()
+ mock_validate.return_value = get_test_validation()
+ self.addCleanup(mock_validate.stop)
+
+ mock_adf = mock.patch.object(self.driver, '_add_driver_fields').start()
+ self.addCleanup(mock_adf.stop)
+ mock_pvifs = mock.patch.object(self.driver, 'plug_vifs').start()
+ self.addCleanup(mock_pvifs.stop)
+ mock_sf = mock.patch.object(self.driver, '_start_firewall').start()
+ self.addCleanup(mock_pvifs.stop)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') \
+ as mock_sps:
+ self.driver.spawn(self.ctx, instance, None, [], None)
+
+ mock_get.assert_called_once_with(node_uuid)
+ mock_validate.assert_called_once_with(node_uuid)
+ mock_fg.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_adf.assert_called_once_with(node, instance, None, fake_flavor)
+ mock_pvifs.assert_called_once_with(instance, None)
+ mock_sf.assert_called_once_with(instance, None)
+ mock_sps.assert_called_once_with(node_uuid, 'active')
+
+ def test_spawn_setting_instance_uuid_fail(self):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=uuidutils.generate_uuid())
+ with mock.patch.object(FAKE_CLIENT.node, 'update') as mock_update:
+ mock_update.side_effect = ironic_exception.HTTPBadRequest()
+ self.assertRaises(exception.NovaException, self.driver.spawn,
+ self.ctx, instance, None, [], None)
+
+ def test_spawn_node_driver_validation_fail(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ fake_flavor = 'fake-flavor'
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_fg = mock.patch.object(self.driver.virtapi, 'flavor_get').start()
+ mock_fg.return_value = fake_flavor
+ self.addCleanup(mock_fg.stop)
+
+ mock_adf = mock.patch.object(self.driver, '_add_driver_fields').start()
+ self.addCleanup(mock_adf.stop)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'validate') as mock_validate:
+ mock_validate.return_value = get_test_validation(power=False,
+ deploy=False)
+ self.assertRaises(exception.ValidationError, self.driver.spawn,
+ self.ctx, instance, None, [], None)
+
+ mock_get.assert_called_once_with(node_uuid)
+ mock_validate.assert_called_once_with(node_uuid)
+ mock_fg.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_adf.assert_called_once_with(node, instance, None, fake_flavor)
+
+ def test_spawn_node_prepare_for_deploy_fail(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_validate = mock.patch.object(FAKE_CLIENT.node, 'validate').start()
+ mock_validate.return_value = get_test_validation()
+ self.addCleanup(mock_validate.stop)
+
+ mock_fg = mock.patch.object(self.driver.virtapi, 'flavor_get').start()
+ self.addCleanup(mock_fg.stop)
+ mock_pvifs = mock.patch.object(self.driver, 'plug_vifs').start()
+ self.addCleanup(mock_pvifs.stop)
+ mock_upvifs = mock.patch.object(self.driver, 'unplug_vifs').start()
+ self.addCleanup(mock_upvifs.stop)
+ mock_stof = mock.patch.object(self.driver, '_stop_firewall').start()
+ self.addCleanup(mock_stof.stop)
+
+ class TestException(Exception):
+ pass
+
+ with mock.patch.object(self.driver, '_start_firewall') as mock_sf:
+ mock_sf.side_effect = TestException()
+ self.assertRaises(TestException, self.driver.spawn,
+ self.ctx, instance, None, [], None)
+
+ mock_get.assert_called_once_with(node_uuid)
+ mock_validate.assert_called_once_with(node_uuid)
+ mock_fg.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_upvifs.assert_called_once_with(instance, None)
+ mock_stof.assert_called_once_with(instance, None)
+
+ def test_spawn_node_trigger_deploy_fail(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_validate = mock.patch.object(FAKE_CLIENT.node, 'validate').start()
+ mock_validate.return_value = get_test_validation()
+ self.addCleanup(mock_validate.stop)
+
+ mock_fg = mock.patch.object(self.driver.virtapi, 'flavor_get').start()
+ self.addCleanup(mock_fg.stop)
+ mock_pvifs = mock.patch.object(self.driver, 'plug_vifs').start()
+ self.addCleanup(mock_pvifs.stop)
+ mock_sf = mock.patch.object(self.driver, '_start_firewall').start()
+ self.addCleanup(mock_sf.stop)
+ mock_upvifs = mock.patch.object(self.driver, 'unplug_vifs').start()
+ self.addCleanup(mock_upvifs.stop)
+ mock_stof = mock.patch.object(self.driver, '_stop_firewall').start()
+ self.addCleanup(mock_stof.stop)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') \
+ as mock_sps:
+ mock_sps.side_effect = ironic_driver.MaximumRetriesReached
+ self.assertRaises(exception.NovaException, self.driver.spawn,
+ self.ctx, instance, None, [], None)
+
+ mock_get.assert_called_once_with(node_uuid)
+ mock_validate.assert_called_once_with(node_uuid)
+ mock_fg.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_upvifs.assert_called_once_with(instance, None)
+ mock_stof.assert_called_once_with(instance, None)
+
+ def test_destroy(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_sps = mock.patch.object(FAKE_CLIENT.node,
+ 'set_provision_state').start()
+ self.addCleanup(mock_sps.stop)
+ mock_update = mock.patch.object(FAKE_CLIENT.node, 'update').start()
+ self.addCleanup(mock_update.stop)
+ mock_upvifs = mock.patch.object(self.driver, 'unplug_vifs').start()
+ self.addCleanup(mock_upvifs.stop)
+ mock_stof = mock.patch.object(self.driver, '_stop_firewall').start()
+ self.addCleanup(mock_stof.stop)
+
+ self.driver.destroy(self.ctx, instance, None, None)
+ mock_sps.assert_called_once_with(node_uuid, 'deleted')
+ mock_get.assert_called_with(node_uuid)
+ mock_upvifs.assert_called_once_with(instance, None)
+ mock_stof.assert_called_once_with(instance, None)
+
+ def test_destroy_trigger_undeploy_fail(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') \
+ as mock_sps:
+ mock_sps.side_effect = ironic_driver.MaximumRetriesReached
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+
+ def test_destroy_unprovision_fail(self):
+ CONF.set_default('api_max_retries', default=1, group='ironic')
+ CONF.set_default('api_retry_interval', default=0, group='ironic')
+
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(driver='fake', uuid=node_uuid,
+ provision_state='fake-state')
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_sps = mock.patch.object(FAKE_CLIENT.node,
+ 'set_provision_state').start()
+ self.addCleanup(mock_sps.stop)
+
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+ mock_sps.assert_called_once_with(node_uuid, 'deleted')
+
+ def test_destroy_unassociate_fail(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_sps = mock.patch.object(FAKE_CLIENT.node,
+ 'set_provision_state').start()
+ self.addCleanup(mock_sps.stop)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'update') as mock_update:
+ mock_update.side_effect = ironic_driver.MaximumRetriesReached()
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+ mock_sps.assert_called_once_with(node_uuid, 'deleted')
+ mock_get.assert_called_with(node_uuid)
+
+ def test_reboot(self):
+ #TODO(lucasagomes): Not implemented in the driver.py
+ pass
+
+ def test_power_off(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ with mock.patch.object(FAKE_CLIENT.node, 'set_power_state') as mock_sp:
+ self.driver.power_off(instance)
+ mock_sp.assert_called_once_with(node_uuid, 'off')
+
+ def test_power_on(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ with mock.patch.object(FAKE_CLIENT.node, 'set_power_state') as mock_sp:
+ self.driver.power_on(self.ctx, instance,
+ utils.get_test_network_info())
+ mock_sp.assert_called_once_with(node_uuid, 'on')
+
+ def test_get_host_stats(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ cpu_arch = 'x86_64'
+ node = get_test_node(uuid=node_uuid,
+ properties={'cpu_arch': cpu_arch})
+ supported_instances = 'fake-supported-instances'
+ resource = {'supported_instances': supported_instances,
+ 'hypervisor_hostname': uuidutils.generate_uuid(),
+ 'cpu_info': 'baremetal cpu',
+ 'hypervisor_version': 1,
+ 'local_gb': 10,
+ 'memory_mb_used': 512,
+ 'stats': {'cpu_arch': 'x86_64',
+ 'ironic_driver':
+ 'nova.virt.ironic.driver.IronicDriver',
+ 'test_spec': 'test_value'},
+ 'vcpus_used': 2,
+ 'hypervisor_type': 'ironic',
+ 'local_gb_used': 10,
+ 'memory_mb': 512,
+ 'vcpus': 2}
+
+ # Reset driver specs
+ test_extra_spec = 'test-spec'
+ self.driver.extra_specs = {test_extra_spec: test_extra_spec}
+
+ with mock.patch.object(FAKE_CLIENT.node, 'list') as mock_list:
+ mock_list.return_value = [node]
+ with mock.patch.object(self.driver, '_node_resource') as mock_nr:
+ mock_nr.return_value = resource
+ with mock.patch.object(ironic_driver,
+ '_get_nodes_supported_instances') as mock_gnsi:
+ mock_gnsi.return_value = supported_instances
+
+ expected = {'vcpus': resource['vcpus'],
+ 'vcpus_used': resource['vcpus_used'],
+ 'cpu_info': resource['cpu_info'],
+ 'disk_total': resource['local_gb'],
+ 'disk_used': resource['local_gb'],
+ 'disk_available': 0,
+ 'host_memory_total': resource['memory_mb'],
+ 'host_memory_free': 0,
+ 'hypervisor_type': resource['hypervisor_type'],
+ 'hypervisor_version': resource['hypervisor_version'],
+ 'supported_instances': supported_instances,
+ 'host': CONF.host,
+ 'hypervisor_hostname': node_uuid,
+ 'node': node_uuid,
+ 'cpu_arch': cpu_arch,
+ test_extra_spec: test_extra_spec}
+
+ result = self.driver.get_host_stats()
+ self.assertEqual([expected], result)
+
+ def test_plug_vifs(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(uuid=node_uuid)
+ port = get_test_port()
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_uvifs = mock.patch.object(self.driver, 'unplug_vifs').start()
+ self.addCleanup(mock_uvifs.stop)
+ mock_update = mock.patch.object(FAKE_CLIENT.port, 'update').start()
+ self.addCleanup(mock_update.stop)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'list_ports') as mock_lp:
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = utils.get_test_network_info()
+
+ port_id = unicode(network_info[0]['id'])
+ expected_patch = [{'op': 'add',
+ 'path': '/extra/vif_port_id',
+ 'value': port_id}]
+ self.driver.plug_vifs(instance, network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(instance, network_info)
+ mock_get.assert_called_once_with(node_uuid)
+ mock_lp.assert_called_once_with(node_uuid)
+ mock_update.assert_called_once_with(port.uuid, expected_patch)
+
+ def test_plug_vifs_count_missmatch(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(uuid=node_uuid)
+ port = get_test_port()
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_uvifs = mock.patch.object(self.driver, 'unplug_vifs').start()
+ self.addCleanup(mock_uvifs.stop)
+ mock_update = mock.patch.object(FAKE_CLIENT.port, 'update').start()
+ self.addCleanup(mock_update.stop)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'list_ports') as mock_lp:
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ # len(network_info) > len(ports)
+ network_info = (utils.get_test_network_info() +
+ utils.get_test_network_info())
+ self.assertRaises(exception.NovaException,
+ self.driver.plug_vifs, instance,
+ network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(instance, network_info)
+ mock_get.assert_called_once_with(node_uuid)
+ mock_lp.assert_called_once_with(node_uuid)
+ # assert port.update() was not called
+ assert not mock_update.called
+
+ def test_plug_vifs_no_network_info(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(uuid=node_uuid)
+ port = get_test_port()
+
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+ mock_uvifs = mock.patch.object(self.driver, 'unplug_vifs').start()
+ self.addCleanup(mock_uvifs.stop)
+ mock_update = mock.patch.object(FAKE_CLIENT.port, 'update').start()
+ self.addCleanup(mock_update.stop)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'list_ports') as mock_lp:
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = []
+ self.driver.plug_vifs(instance, network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(instance, network_info)
+ mock_get.assert_called_once_with(node_uuid)
+ mock_lp.assert_called_once_with(node_uuid)
+ # assert port.update() was not called
+ assert not mock_update.called
+
+ def test_unplug_vifs(self):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = get_test_node(uuid=node_uuid)
+ port = get_test_port()
+
+ mock_update = mock.patch.object(FAKE_CLIENT.port, 'update').start()
+ self.addCleanup(mock_update.stop)
+ mock_get = mock.patch.object(FAKE_CLIENT.node, 'get').start()
+ mock_get.return_value = node
+ self.addCleanup(mock_get.stop)
+
+ with mock.patch.object(FAKE_CLIENT.node, 'list_ports') as mock_lp:
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ expected_patch = [{'op': 'remove', 'path':
+ '/extra/vif_port_id'}]
+ self.driver.unplug_vifs(instance,
+ utils.get_test_network_info())
+
+ # asserts
+ mock_get.assert_called_once_with(node_uuid)
+ mock_lp.assert_called_once_with(node_uuid)
+ mock_update.assert_called_once_with(port.uuid, expected_patch)
+
+ def test_unplug_vifs_no_network_info(self):
+ mock_update = mock.patch.object(FAKE_CLIENT.port, 'update').start()
+ self.addCleanup(mock_update.stop)
+
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = []
+ self.driver.unplug_vifs(instance, network_info)
+
+ # assert port.update() was not called
+ assert not mock_update.called
diff --git a/ironic/nova/virt/__init__.py b/ironic/nova/virt/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ironic/nova/virt/__init__.py
diff --git a/ironic/nova/virt/ironic/__init__.py b/ironic/nova/virt/ironic/__init__.py
new file mode 100644
index 000000000..df10d16b9
--- /dev/null
+++ b/ironic/nova/virt/ironic/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironic.nova.virt.ironic import driver
+
+IronicDriver = driver.IronicDriver
diff --git a/ironic/nova/virt/ironic/driver.py b/ironic/nova/virt/ironic/driver.py
new file mode 100644
index 000000000..c22738c3d
--- /dev/null
+++ b/ironic/nova/virt/ironic/driver.py
@@ -0,0 +1,605 @@
+# coding=utf-8
+#
+# Copyright 2014 Red Hat, Inc.
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A driver wrapping the Ironic API, such that Nova may provision
+bare metal resources.
+"""
+
+from ironicclient import client as ironic_client
+from ironicclient import exc as ironic_exception
+from oslo.config import cfg
+
+from nova.compute import power_state
+from nova import context as nova_context
+from nova import exception
+from nova.openstack.common import excutils
+from nova.openstack.common.gettextutils import _
+from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
+from nova.openstack.common import loopingcall
+from nova.virt import driver as virt_driver
+from nova.virt import firewall
+from ironic.nova.virt.ironic import ironic_states
+
+LOG = logging.getLogger(__name__)
+
+opts = [
+ cfg.IntOpt('api_version',
+ default=1,
+ help='Version of Ironic API service endpoint.'),
+ cfg.StrOpt('api_endpoint',
+ help='URL for Ironic API endpoint.'),
+ cfg.StrOpt('admin_username',
+ help='Ironic keystone admin name'),
+ cfg.StrOpt('admin_password',
+ help='Ironic keystone admin password.'),
+ cfg.StrOpt('admin_url',
+ help='Ironic public api endpoint.'),
+ cfg.StrOpt('pxe_bootfile_name',
+ help='This gets passed to Neutron as the bootfile dhcp '
+ 'parameter when the dhcp_options_enabled is set.',
+ default='pxelinux.0'),
+ cfg.StrOpt('admin_tenant_name',
+ help='Ironic keystone tenet name.'),
+ cfg.ListOpt('instance_type_extra_specs',
+ default=[],
+ help='A list of additional capabilities corresponding to '
+ 'instance_type_extra_specs for this compute '
+ 'host to advertise. Valid entries are name=value, pairs '
+ 'For example, "key1:val1, key2:val2"'),
+ cfg.IntOpt('api_max_retries',
+ default=5,
+ help=('How many retries when a request does conflict.')),
+ cfg.IntOpt('api_retry_interval',
+ default=2,
+ help=('How often to retry in seconds when a request '
+ 'does conflict')),
+ ]
+
+ironic_group = cfg.OptGroup(name='ironic',
+ title='Ironic Options')
+
+CONF = cfg.CONF
+CONF.register_group(ironic_group)
+CONF.register_opts(opts, ironic_group)
+
+_FIREWALL_DRIVER = "%s.%s" % (firewall.__name__,
+ firewall.NoopFirewallDriver.__name__)
+
+_POWER_STATE_MAP = {
+ ironic_states.POWER_ON: power_state.RUNNING,
+ ironic_states.NOSTATE: power_state.NOSTATE,
+ ironic_states.POWER_OFF: power_state.SHUTDOWN,
+}
+
+
+class MaximumRetriesReached(exception.NovaException):
+ msg_fmt = _("Maximum number of retries reached.")
+
+
+def map_power_state(state):
+ try:
+ return _POWER_STATE_MAP[state]
+ except KeyError:
+ LOG.warning(_("Power state %s not found.") % state)
+ return power_state.NOSTATE
+
+
+def _get_required_value(key, value):
+ """Return the requested value."""
+ if '/' in value:
+ # we need to split the value
+ split_value = value.split('/')
+ eval_string = 'key'
+ for value in split_value:
+ eval_string = "%s['%s']" % (eval_string, value)
+ return eval(eval_string)
+ else:
+ return key[value]
+
+
+def _get_nodes_supported_instances(cpu_arch=''):
+ """Return supported instances for a node."""
+ return [(cpu_arch, 'baremetal', 'baremetal')]
+
+class IronicDriver(virt_driver.ComputeDriver):
+ """Hypervisor driver for Ironic - bare metal provisioning."""
+
+ capabilities = {"has_imagecache": False}
+
+ def __init__(self, virtapi, read_only=False):
+ super(IronicDriver, self).__init__(virtapi)
+
+ self.firewall_driver = firewall.load_driver(default=_FIREWALL_DRIVER)
+ # TODO(deva): sort out extra_specs and nova-scheduler interaction
+ extra_specs = {}
+ extra_specs["ironic_driver"] = "ironic.nova.virt.ironic.driver.IronicDriver"
+ # cpu_arch set per node.
+ extra_specs['cpu_arch'] = ''
+ for pair in CONF.ironic.instance_type_extra_specs:
+ keyval = pair.split(':', 1)
+ keyval[0] = keyval[0].strip()
+ keyval[1] = keyval[1].strip()
+ extra_specs[keyval[0]] = keyval[1]
+
+ self.extra_specs = extra_specs
+
+ def _retry_on_conflict(self, func, *args):
+ """Rety the request if the API returns 409 (Conflict)."""
+ def _request_api():
+ try:
+ func(*args)
+ raise loopingcall.LoopingCallDone()
+ except ironic_exception.HTTPConflict:
+ pass
+
+ if self.tries >= CONF.ironic.api_max_retries:
+ raise MaximumRetriesReached()
+ else:
+ self.tries += 1
+
+ self.tries = 0
+ timer = loopingcall.FixedIntervalLoopingCall(_request_api)
+ timer.start(interval=CONF.ironic.api_retry_interval).wait()
+
+ def _get_client(self):
+ # TODO(deva): save and reuse existing client & auth token
+ # until it expires or is no longer valid
+ ctx = nova_context.get_admin_context()
+
+ if ctx.auth_token is None:
+ kwargs = {'os_username': CONF.ironic.admin_username,
+ 'os_password': CONF.ironic.admin_password,
+ 'os_auth_url': CONF.ironic.admin_url,
+ 'os_tenant_name': CONF.ironic.admin_tenant_name,
+ 'os_service_type': 'baremetal',
+ 'os_endpoint_type': 'public'}
+ else:
+ kwargs = {'os_auth_token': ctx.auth_token,
+ 'ironic_url': CONF.ironic.api_endpoint}
+ return ironic_client.get_client(CONF.ironic.api_version, **kwargs)
+
+ def _require_node(self, instance):
+ """Get a node's uuid out of a manager instance dict.
+
+ The compute manager is meant to know the node uuid, so missing uuid
+ a significant issue - it may mean we've been passed someone elses data.
+ """
+ node_uuid = instance.get('node')
+ if not node_uuid:
+ raise exception.NovaException(
+ _("Ironic node uuid not supplied to driver for %r")
+ % instance['uuid'])
+ return node_uuid
+
+ def _node_resource(self, node):
+ # TODO(deva): refactor this to match ironic node datastruct
+ vcpus_used = 0
+ memory_mb_used = 0
+ local_gb_used = 0
+
+ vcpus = int(node.properties.get('cpus', 0))
+ memory_mb = int(node.properties.get('memory_mb', 0))
+ local_gb = int(node.properties.get('local_gb', 0))
+ cpu_arch = str(node.properties.get('cpu_arch', 'NotFound'))
+ nodes_extra_specs = self.extra_specs
+ nodes_extra_specs['cpu_arch'] = cpu_arch
+
+ if node.instance_uuid:
+ vcpus_used = vcpus
+ memory_mb_used = memory_mb
+ local_gb_used = local_gb
+
+ dic = {'vcpus': vcpus,
+ 'memory_mb': memory_mb,
+ 'local_gb': local_gb,
+ 'vcpus_used': vcpus_used,
+ 'memory_mb_used': memory_mb_used,
+ 'local_gb_used': local_gb_used,
+ 'hypervisor_type': self.get_hypervisor_type(),
+ 'hypervisor_version': self.get_hypervisor_version(),
+ 'hypervisor_hostname': str(node.uuid),
+ 'cpu_info': 'baremetal cpu',
+ 'supported_instances': jsonutils.dumps(
+ _get_nodes_supported_instances(cpu_arch)),
+ 'stats': jsonutils.dumps(nodes_extra_specs)
+ }
+ return dic
+
+ def _start_firewall(self, instance, network_info):
+ self.firewall_driver.setup_basic_filtering(instance, network_info)
+ self.firewall_driver.prepare_instance_filter(instance, network_info)
+ self.firewall_driver.apply_instance_filter(instance, network_info)
+
+ def _stop_firewall(self, instance, network_info):
+ self.firewall_driver.unfilter_instance(instance, network_info)
+
+ def _add_driver_fields(self, node, instance, image_meta, flavor=None):
+ icli = self._get_client()
+ if 'pxe' in node.driver:
+ # add required fields
+ pxe_fields = importutils.import_class(
+ 'ironic.nova.virt.ironic.ironic_driver_fields.PXE')
+
+ patch = []
+ for field in pxe_fields.required:
+ path_to_add = "%s/%s" % (field['ironic_path'],
+ field['ironic_variable'])
+ patch = [{'op': 'add',
+ 'path': path_to_add,
+ 'value': unicode(_get_required_value(
+ eval(field['nova_object']),
+ field['object_field']))}]
+ try:
+ self._retry_on_conflict(icli.node.update, node.uuid, patch)
+ except MaximumRetriesReached:
+ msg = (_("Adding the parameter %(param)s on node %(node)s "
+ "failed after %(retries)d retries")
+ % {'param': path_to_add, 'node': node.uuid,
+ 'retries': CONF.ironic.api_max_retries})
+ LOG.error(msg)
+ exception.NovaException(msg)
+
+ def _cleanup_deploy(self, node, instance, network_info):
+ icli = self._get_client()
+ if 'pxe' in node.driver:
+ # add required fields
+ pxe_fields = importutils.import_class(
+ 'ironic.nova.virt.ironic.ironic_driver_fields.PXE')
+
+ patch = []
+ for field in pxe_fields.required:
+ path_to_remove = "%s/%s" % (field['ironic_path'],
+ field['ironic_variable'])
+ patch = [{'op': 'remove', 'path': path_to_remove}]
+
+ try:
+ self._retry_on_conflict(icli.node.update, node.uuid, patch)
+ except MaximumRetriesReached:
+ LOG.warning(_("Removing the parameter %(param)s on node "
+ "%(node)s failed after %(retries)d retries")
+ % {'param': path_to_remove, 'node': node.uuid,
+ 'retries': CONF.ironic.api_max_retries})
+ except ironic_exception.HTTPBadRequest:
+ pass
+
+ self.unplug_vifs(instance, network_info)
+ self._stop_firewall(instance, network_info)
+
+ @classmethod
+ def instance(cls):
+ if not hasattr(cls, '_instance'):
+ cls._instance = cls()
+ return cls._instance
+
+ def init_host(self, host):
+ return
+
+ def get_hypervisor_type(self):
+ return 'ironic'
+
+ def get_hypervisor_version(self):
+ return CONF.ironic.api_version
+
+ def list_instances(self):
+ try:
+ icli = self._get_client()
+ except ironic_exception.Unauthorized:
+ LOG.error(_("Unable to authenticate Ironic client."))
+ return []
+
+ instances = [i for i in icli.node.list() if i.instance_uuid]
+ return instances
+
+ def get_available_nodes(self, refresh=False):
+ nodes = []
+ icli = self._get_client()
+ node_list = icli.node.list()
+
+ for n in node_list:
+ # for now we'll use the nodes power state. if power_state is None
+ # we'll assume it is not ready to be presented to Nova.
+ if n.power_state:
+ nodes.append(n.uuid)
+
+ LOG.debug("Returning Nodes: %s" % nodes)
+ return nodes
+
+ def get_available_resource(self, node):
+ """Retrieve resource information.
+
+ This method is called when nova-compute launches, and
+ as part of a periodic task that records the results in the DB.
+
+ :param node: the uuid of the node
+ :returns: dictionary describing resources
+
+ """
+ icli = self._get_client()
+ node = icli.node.get(node)
+ return self._node_resource(node)
+
+ def get_info(self, instance):
+
+ icli = self._get_client()
+ try:
+ node = icli.node.get_by_instance_uuid(instance['uuid'])
+ except ironic_exception.HTTPNotFound:
+ return {'state': map_power_state(ironic_states.NOSTATE),
+ 'max_mem': 0,
+ 'mem': 0,
+ 'num_cpu': 0,
+ 'cpu_time': 0
+ }
+
+ return {'state': map_power_state(node.power_state),
+ 'max_mem': node.properties.get('memory_mb'),
+ 'mem': node.properties.get('memory_mb'),
+ 'num_cpu': node.properties.get('cpus'),
+ 'cpu_time': 0
+ }
+
+ def macs_for_instance(self, instance):
+ icli = self._get_client()
+ try:
+ node = icli.node.get(instance['node'])
+ except ironic_exception.HTTPNotFound:
+ return []
+ ports = icli.node.list_ports(node.uuid)
+ return [p.address for p in ports]
+
+ def spawn(self, context, instance, image_meta, injected_files,
+ admin_password, network_info=None, block_device_info=None):
+ node_uuid = self._require_node(instance)
+
+ icli = self._get_client()
+ node = icli.node.get(node_uuid)
+
+ # Associate the node to this instance
+ try:
+ # FIXME: this SHOULD function as a lock, so no other instance
+ # can be associated to this node. BUT IT DOESN'T!
+ patch = [{'op': 'replace',
+ 'path': '/instance_uuid',
+ 'value': instance['uuid']}]
+ self._retry_on_conflict(icli.node.update, node_uuid, patch)
+ except (ironic_exception.HTTPBadRequest, MaximumRetriesReached):
+ msg = _("Unable to set instance UUID for node %s") % node_uuid
+ LOG.error(msg)
+ raise exception.NovaException(msg)
+
+ # Set image id, and other driver info so we can pass it down to Ironic
+ # use the ironic_driver_fields file to import
+ flavor = self.virtapi.flavor_get(context, instance['instance_type_id'])
+ self._add_driver_fields(node, instance, image_meta, flavor)
+
+ #validate we ready to do the deploy
+ validate_chk = icli.node.validate(node_uuid)
+ if not validate_chk.deploy or not validate_chk.power:
+ # something is wrong. undo we we have done
+ self._cleanup_deploy(node, instance, network_info)
+ raise exception.ValidationError(_(
+ "Ironic node: %(id)s failed to validate."
+ " (deploy: %(deploy)s, power: %(power)s)")
+ % {'id': node.uuid,
+ 'deploy': validate_chk.deploy,
+ 'power': validate_chk.power})
+
+ # prepare for the deploy
+ try:
+ self.plug_vifs(instance, network_info)
+ self._start_firewall(instance, network_info)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("Error preparing deploy for instance %(instance)s "
+ "on baremetal node %(node)s.") %
+ {'instance': instance['uuid'],
+ 'node': node_uuid})
+ self._cleanup_deploy(node, instance, network_info)
+
+ # trigger the node deploy
+ try:
+ self._retry_on_conflict(icli.node.set_provision_state,
+ node_uuid, 'active')
+ except MaximumRetriesReached:
+ msg = (_("Error triggering the node %s to start the deployment")
+ % node_uuid)
+ LOG.error(msg)
+ self._cleanup_deploy(node, instance, network_info)
+ raise exception.NovaException(msg)
+
+ def destroy(self, context, instance, network_info,
+ block_device_info=None):
+
+ node_uuid = self._require_node(instance)
+ icli = self._get_client()
+
+ # do node tear down and wait for the state change
+ try:
+ self._retry_on_conflict(icli.node.set_provision_state,
+ node_uuid, 'deleted')
+ except MaximumRetriesReached:
+ msg = (_("Error triggering the unprovisioning of the node %s")
+ % node_uuid)
+ LOG.error(msg)
+ raise exception.NovaException(msg)
+
+ def _wait_for_provision_state(node_uuid):
+ node = icli.node.get(node_uuid)
+ if not node.provision_state:
+ raise loopingcall.LoopingCallDone()
+
+ if self.tries >= CONF.ironic.api_max_retries:
+ msg = (_("Error unprovisioning the node %(node)s: Provision "
+ "state still marked as '%(state)s'")
+ % {'state': node.provision_state, 'node': node_uuid})
+ LOG.error(msg)
+ raise exception.NovaException(msg)
+ else:
+ self.tries += 1
+
+ self.tries = 0
+ timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state,
+ node_uuid)
+ timer.start(interval=CONF.ironic.api_retry_interval).wait()
+
+ node = icli.node.get(node_uuid)
+
+ # remove the instance uuid
+ patch = [{'op': 'remove', 'path': '/instance_uuid'}]
+ try:
+ self._retry_on_conflict(icli.node.update, node_uuid, patch)
+ except MaximumRetriesReached:
+ msg = (_("Failed to unassociate the instance %(instance)s "
+ "with node %(node)s") % {'instance': instance['uuid'],
+ 'node': node_uuid})
+ LOG.error(msg)
+ raise exception.NovaException(msg)
+ except ironic_exception.HTTPBadRequest:
+ pass
+
+ self._cleanup_deploy(node, instance, network_info)
+
+ def reboot(self, context, instance, network_info, reboot_type,
+ block_device_info=None, bad_volumes_callback=None):
+ pass
+
+ def power_off(self, instance, node=None):
+ # TODO(nobodycam): check the current power state first.
+ node_uuid = self._require_node(instance)
+ icli = self._get_client()
+ icli.node.set_power_state(node_uuid, 'off')
+
+ def power_on(self, context, instance, network_info, block_device_info=None,
+ node=None):
+ # TODO(nobodycam): check the current power state first.
+ node_uuid = self._require_node(instance)
+ icli = self._get_client()
+ icli.node.set_power_state(node_uuid, 'on')
+
+ def get_host_stats(self, refresh=False):
+ caps = []
+ icli = self._get_client()
+
+ for node in icli.node.list():
+ res = self._node_resource(node)
+ nodename = str(node.uuid)
+ cpu_arch = str(node.properties.get('cpu_arch', 'NotFound'))
+
+ nodes_extra_specs = self.extra_specs
+ nodes_extra_specs['cpu_arch'] = cpu_arch
+ data = {}
+ data['vcpus'] = res['vcpus']
+ data['vcpus_used'] = res['vcpus_used']
+ data['cpu_info'] = res['cpu_info']
+ data['disk_total'] = res['local_gb']
+ data['disk_used'] = res['local_gb_used']
+ data['disk_available'] = res['local_gb'] - res['local_gb_used']
+ data['host_memory_total'] = res['memory_mb']
+ data['host_memory_free'] = res['memory_mb'] - res['memory_mb_used']
+ data['hypervisor_type'] = res['hypervisor_type']
+ data['hypervisor_version'] = res['hypervisor_version']
+ data['supported_instances'] = _get_nodes_supported_instances(
+ cpu_arch)
+ data.update(nodes_extra_specs)
+ data['host'] = CONF.host
+ data['hypervisor_hostname'] = nodename
+ data['node'] = nodename
+ caps.append(data)
+ return caps
+
+ def manage_image_cache(self, context, all_instances):
+ pass
+
+ def get_console_output(self, instance):
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self, security_group_id):
+ pass
+
+ def refresh_security_group_members(self, security_group_id):
+ pass
+
+ def refresh_provider_fw_rules(self):
+ pass
+
+ def refresh_instance_security_rules(self, instance):
+ pass
+
+ def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
+ pass
+
+ def unfilter_instance(self, instance_ref, network_info):
+ pass
+
+ def plug_vifs(self, instance, network_info):
+ LOG.debug(_("plug: instance_uuid=%(uuid)s vif=%(network_info)s")
+ % {'uuid': instance['uuid'], 'network_info': network_info})
+ # start by ensuring the ports are clear
+ self.unplug_vifs(instance, network_info)
+
+ icli = self._get_client()
+ node = icli.node.get(instance['node'])
+ ports = icli.node.list_ports(node.uuid)
+
+ if len(network_info) > len(ports):
+ raise exception.NovaException(_(
+ "Ironic node: %(id)s virtual to physical interface count"
+ " missmatch"
+ " (Vif count: %(vif_count)d, Pif count: %(pif_count)d)")
+ % {'id': node.uuid,
+ 'vif_count': len(network_info),
+ 'pif_count': len(ports)})
+
+ if len(network_info) > 0:
+ # not needed if no vif are defined
+ for vif, pif in zip(network_info, ports):
+ # attach what neutron needs directly to the port
+ port_id = unicode(vif['id'])
+ patch = [{'op': 'add',
+ 'path': '/extra/vif_port_id',
+ 'value': port_id}]
+ try:
+ self._retry_on_conflict(icli.port.update, pif.uuid, patch)
+ except MaximumRetriesReached:
+ msg = (_("Failed to set the VIF networking for port %s")
+ % pif.uuid)
+ raise exception.NovaException(msg)
+
+ def unplug_vifs(self, instance, network_info):
+ LOG.debug(_("unplug: instance_uuid=%(uuid)s vif=%(network_info)s")
+ % {'uuid': instance['uuid'], 'network_info': network_info})
+ if network_info and len(network_info) > 0:
+ icli = self._get_client()
+ node = icli.node.get(instance['node'])
+ ports = icli.node.list_ports(node.uuid)
+
+ # not needed if no vif are defined
+ for vif, pif in zip(network_info, ports):
+ # we can not attach a dict directly
+ patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}]
+ try:
+ self._retry_on_conflict(icli.port.update, pif.uuid, patch)
+ except MaximumRetriesReached:
+ msg = (_("Failed to remove the VIF networking for port %s")
+ % pif.uuid)
+ LOG.warning(msg)
+ except ironic_exception.HTTPBadRequest:
+ pass
diff --git a/ironic/nova/virt/ironic/ironic_driver_fields.py b/ironic/nova/virt/ironic/ironic_driver_fields.py
new file mode 100644
index 000000000..434c2a66c
--- /dev/null
+++ b/ironic/nova/virt/ironic/ironic_driver_fields.py
@@ -0,0 +1,56 @@
+# coding=utf-8
+#
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Values should be a list of dicts. Where each dict is
+# {'ironic_path': '/driver_info', 'ironic_variable': 'pxe_image_source',
+# 'nova_object': 'image_meta', 'object_field': "['id']"}
+
+"""
+Ironic driver required info mapping.
+"""
+
+
+class FAKE(object):
+ """Required and optional field list for ironic's FAKE driver."""
+ required = []
+ optional = []
+
+
+class PXE(object):
+ """Required and optional field list for ironic's PXE driver."""
+ required = [
+ {'ironic_path': '/driver_info',
+ 'ironic_variable': 'pxe_image_source',
+ 'nova_object': 'image_meta',
+ 'object_field': 'id'},
+ {'ironic_path': '/driver_info',
+ 'ironic_variable': 'pxe_root_gb',
+ 'nova_object': 'instance',
+ 'object_field': 'root_gb'},
+ {'ironic_path': '/driver_info',
+ 'ironic_variable': 'pxe_deploy_kernel',
+ 'nova_object': 'flavor',
+ 'object_field': 'extra_specs/'
+ 'baremetal:deploy_kernel_id'},
+ {'ironic_path': '/driver_info',
+ 'ironic_variable': 'pxe_deploy_ramdisk',
+ 'nova_object': 'flavor',
+ 'object_field': 'extra_specs/'
+ 'baremetal:deploy_ramdisk_id'}
+ ]
+
+ optional = []
diff --git a/ironic/nova/virt/ironic/ironic_states.py b/ironic/nova/virt/ironic/ironic_states.py
new file mode 100644
index 000000000..62e044859
--- /dev/null
+++ b/ironic/nova/virt/ironic/ironic_states.py
@@ -0,0 +1,66 @@
+# coding=utf-8
+#
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Mapping of bare metal node states.
+
+A node may have empty {} `properties` and `driver_info` in which case, it is
+said to be "initialized" but "not available", and the state is NOSTATE.
+
+When updating `properties`, any data will be rejected if the data fails to be
+validated by the driver. Any node with non-empty `properties` is said to be
+"initialized", and the state is INIT.
+
+When the driver has received both `properties` and `driver_info`, it will check
+the power status of the node and update the `power_state` accordingly. If the
+driver fails to read the power state from the node, it will reject the
+`driver_info` change, and the state will remain as INIT. If the power status
+check succeeds, `power_state` will change to one of POWER_ON or POWER_OFF,
+accordingly.
+
+At this point, the power state may be changed via the API, a console
+may be started, and a tenant may be associated.
+
+The `power_state` for a node always represents the current power state. Any
+power operation sets this to the actual state when done (whether successful or
+not). It is set to ERROR only when unable to get the power state from a node.
+
+When `instance_uuid` is set to a non-empty / non-None value, the node is said
+to be "associated" with a tenant.
+
+An associated node can not be deleted.
+
+The `instance_uuid` field may be unset only if the node is in POWER_OFF or
+ERROR states.
+"""
+
+NOSTATE = None
+INIT = 'initializing'
+ACTIVE = 'active'
+BUILDING = 'building'
+DEPLOYING = 'deploying'
+DEPLOYFAIL = 'deploy failed'
+DEPLOYDONE = 'deploy complete'
+DELETING = 'deleting'
+DELETED = 'deleted'
+ERROR = 'error'
+
+POWER_ON = 'power on'
+POWER_OFF = 'power off'
+REBOOT = 'rebooting'
+SUSPEND = 'suspended'
diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh
index a7ff74c70..36cc3f264 100755
--- a/tools/config/generate_sample.sh
+++ b/tools/config/generate_sample.sh
@@ -86,7 +86,7 @@ fi
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
find $TARGETDIR -type f -name "*.pyc" -delete
-FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
+FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" ! -path "*/nova/*" \
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
RC_FILE="`dirname $0`/oslo.config.generator.rc"