summaryrefslogtreecommitdiff
path: root/nova/tests/unit/scheduler
diff options
context:
space:
mode:
authorSean Dague <sean@dague.net>2014-11-07 14:27:03 +0100
committerSean Dague <sean@dague.net>2014-11-12 15:31:08 -0500
commit89cd6a0c493e26b5a9e017c99d731464292abbaf (patch)
treec2bf790d1684cd539b820247113492495123a163 /nova/tests/unit/scheduler
parent5c8bbaafef590e4d346a03051a0ba55c8be26c5c (diff)
downloadnova-89cd6a0c493e26b5a9e017c99d731464292abbaf.tar.gz
move all tests to nova/tests/unit
As part of the split of functional and unit tests we need to isolate the unit tests into a separate directory for having multiple test targets in a sane way. Part of bp:functional-tests-for-nova Change-Id: Id42ba373c1bda6a312b673ab2b489ca56da8c628
Diffstat (limited to 'nova/tests/unit/scheduler')
-rw-r--r--nova/tests/unit/scheduler/__init__.py0
-rw-r--r--nova/tests/unit/scheduler/fakes.py268
-rw-r--r--nova/tests/unit/scheduler/filters/__init__.py0
-rw-r--r--nova/tests/unit/scheduler/filters/test_affinity_filters.py258
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py98
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py72
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py53
-rw-r--r--nova/tests/unit/scheduler/filters/test_availability_zone_filters.py48
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py99
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_filters.py50
-rw-r--r--nova/tests/unit/scheduler/filters/test_core_filters.py87
-rw-r--r--nova/tests/unit/scheduler/filters/test_disk_filters.py100
-rw-r--r--nova/tests/unit/scheduler/filters/test_extra_specs_ops.py200
-rw-r--r--nova/tests/unit/scheduler/filters/test_image_props_filters.py189
-rw-r--r--nova/tests/unit/scheduler/filters/test_io_ops_filters.py63
-rw-r--r--nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py90
-rw-r--r--nova/tests/unit/scheduler/filters/test_json_filters.py289
-rw-r--r--nova/tests/unit/scheduler/filters/test_metrics_filters.py34
-rw-r--r--nova/tests/unit/scheduler/filters/test_num_instances_filters.py63
-rw-r--r--nova/tests/unit/scheduler/filters/test_numa_topology_filters.py151
-rw-r--r--nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py67
-rw-r--r--nova/tests/unit/scheduler/filters/test_ram_filters.py89
-rw-r--r--nova/tests/unit/scheduler/filters/test_retry_filters.py46
-rw-r--r--nova/tests/unit/scheduler/filters/test_trusted_filters.py203
-rw-r--r--nova/tests/unit/scheduler/filters/test_type_filters.py56
-rw-r--r--nova/tests/unit/scheduler/ironic_fakes.py75
-rw-r--r--nova/tests/unit/scheduler/test_baremetal_host_manager.py81
-rw-r--r--nova/tests/unit/scheduler/test_caching_scheduler.py199
-rw-r--r--nova/tests/unit/scheduler/test_chance_scheduler.py182
-rw-r--r--nova/tests/unit/scheduler/test_client.py113
-rw-r--r--nova/tests/unit/scheduler/test_filter_scheduler.py596
-rw-r--r--nova/tests/unit/scheduler/test_filters.py206
-rw-r--r--nova/tests/unit/scheduler/test_filters_utils.py44
-rw-r--r--nova/tests/unit/scheduler/test_host_filters.py38
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py545
-rw-r--r--nova/tests/unit/scheduler/test_ironic_host_manager.py430
-rw-r--r--nova/tests/unit/scheduler/test_rpcapi.py69
-rw-r--r--nova/tests/unit/scheduler/test_scheduler.py378
-rw-r--r--nova/tests/unit/scheduler/test_scheduler_options.py138
-rw-r--r--nova/tests/unit/scheduler/test_scheduler_utils.py314
-rw-r--r--nova/tests/unit/scheduler/test_weights.py338
41 files changed, 6419 insertions, 0 deletions
diff --git a/nova/tests/unit/scheduler/__init__.py b/nova/tests/unit/scheduler/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/scheduler/__init__.py
diff --git a/nova/tests/unit/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py
new file mode 100644
index 0000000000..d1b2918d33
--- /dev/null
+++ b/nova/tests/unit/scheduler/fakes.py
@@ -0,0 +1,268 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fakes For Scheduler tests.
+"""
+
+import mox
+from oslo.serialization import jsonutils
+
+from nova.compute import vm_states
+from nova import db
+from nova.scheduler import filter_scheduler
+from nova.scheduler import host_manager
+from nova.virt import hardware
+
+NUMA_TOPOLOGY = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(
+ 0, set([1, 2]), 512),
+ hardware.VirtNUMATopologyCellUsage(
+ 1, set([3, 4]), 512)])
+
+COMPUTE_NODES = [
+ dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
+ disk_available_least=None, free_ram_mb=512, vcpus_used=1,
+ free_disk_gb=512, local_gb_used=0, updated_at=None,
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None),
+ dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
+ disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
+ free_disk_gb=1024, local_gb_used=0, updated_at=None,
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None),
+ dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
+ disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
+ free_disk_gb=3072, local_gb_used=0, updated_at=None,
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=NUMA_TOPOLOGY.to_json()),
+ dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
+ disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
+ free_disk_gb=8888, local_gb_used=0, updated_at=None,
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None),
+ # Broken entry
+ dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
+]
+
+COMPUTE_NODES_METRICS = [
+ dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
+ disk_available_least=512, free_ram_mb=512, vcpus_used=1,
+ free_disk_gb=512, local_gb_used=0, updated_at=None,
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 512,
+ 'timestamp': None,
+ 'source': 'host1'
+ },
+ {'name': 'bar',
+ 'value': 1.0,
+ 'timestamp': None,
+ 'source': 'host1'
+ },
+ ])),
+ dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
+ disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
+ free_disk_gb=1024, local_gb_used=0, updated_at=None,
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 1024,
+ 'timestamp': None,
+ 'source': 'host2'
+ },
+ {'name': 'bar',
+ 'value': 2.0,
+ 'timestamp': None,
+ 'source': 'host2'
+ },
+ ])),
+ dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
+ disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
+ free_disk_gb=3072, local_gb_used=0, updated_at=None,
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 3072,
+ 'timestamp': None,
+ 'source': 'host3'
+ },
+ {'name': 'bar',
+ 'value': 1.0,
+ 'timestamp': None,
+ 'source': 'host3'
+ },
+ ])),
+ dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
+ disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
+ free_disk_gb=8192, local_gb_used=0, updated_at=None,
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 8192,
+ 'timestamp': None,
+ 'source': 'host4'
+ },
+ {'name': 'bar',
+ 'value': 0,
+ 'timestamp': None,
+ 'source': 'host4'
+ },
+ ])),
+ dict(id=5, local_gb=768, memory_mb=768, vcpus=8,
+ disk_available_least=768, free_ram_mb=768, vcpus_used=0,
+ free_disk_gb=768, local_gb_used=0, updated_at=None,
+ service=dict(host='host5', disabled=False),
+ hypervisor_hostname='node5', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 768,
+ 'timestamp': None,
+ 'source': 'host5'
+ },
+ {'name': 'bar',
+ 'value': 0,
+ 'timestamp': None,
+ 'source': 'host5'
+ },
+ {'name': 'zot',
+ 'value': 1,
+ 'timestamp': None,
+ 'source': 'host5'
+ },
+ ])),
+ dict(id=6, local_gb=2048, memory_mb=2048, vcpus=8,
+ disk_available_least=2048, free_ram_mb=2048, vcpus_used=0,
+ free_disk_gb=2048, local_gb_used=0, updated_at=None,
+ service=dict(host='host6', disabled=False),
+ hypervisor_hostname='node6', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 2048,
+ 'timestamp': None,
+ 'source': 'host6'
+ },
+ {'name': 'bar',
+ 'value': 0,
+ 'timestamp': None,
+ 'source': 'host6'
+ },
+ {'name': 'zot',
+ 'value': 2,
+ 'timestamp': None,
+ 'source': 'host6'
+ },
+ ])),
+]
+
+INSTANCES = [
+ dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
+ host='host1', node='node1'),
+ dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
+ host='host2', node='node2'),
+ dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
+ host='host2', node='node2'),
+ dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
+ host='host3', node='node3'),
+ # Broken host
+ dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
+ host=None),
+ # No matching host
+ dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
+ host='host5', node='node5'),
+]
+
+
+class FakeFilterScheduler(filter_scheduler.FilterScheduler):
+ def __init__(self, *args, **kwargs):
+ super(FakeFilterScheduler, self).__init__(*args, **kwargs)
+ self.host_manager = host_manager.HostManager()
+
+
+class FakeHostManager(host_manager.HostManager):
+ """host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0
+ host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536
+ host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072
+ host4: free_ram_mb=8192 free_disk_gb=8192
+ """
+
+ def __init__(self):
+ super(FakeHostManager, self).__init__()
+
+ self.service_states = {
+ 'host1': {
+ 'compute': {'host_memory_free': 1073741824},
+ },
+ 'host2': {
+ 'compute': {'host_memory_free': 2147483648},
+ },
+ 'host3': {
+ 'compute': {'host_memory_free': 3221225472},
+ },
+ 'host4': {
+ 'compute': {'host_memory_free': 999999999},
+ },
+ }
+
+
+class FakeHostState(host_manager.HostState):
+ def __init__(self, host, node, attribute_dict):
+ super(FakeHostState, self).__init__(host, node)
+ for (key, val) in attribute_dict.iteritems():
+ setattr(self, key, val)
+
+
+class FakeInstance(object):
+ def __init__(self, context=None, params=None):
+ """Create a test instance. Returns uuid."""
+ self.context = context
+
+ i = self._create_fake_instance(params=params)
+ self.uuid = i['uuid']
+
+ def _create_fake_instance(self, params=None):
+ """Create a test instance."""
+ if not params:
+ params = {}
+
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['image_ref'] = 1
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = 'fake'
+ inst['project_id'] = 'fake'
+ inst['instance_type_id'] = 2
+ inst['ami_launch_index'] = 0
+ inst.update(params)
+ return db.instance_create(self.context, inst)
+
+
+class FakeComputeAPI(object):
+ def create_db_entry_for_new_instance(self, *args, **kwargs):
+ pass
+
+
+def mox_host_manager_db_calls(mock, context):
+ mock.StubOutWithMock(db, 'compute_node_get_all')
+
+ db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
diff --git a/nova/tests/unit/scheduler/filters/__init__.py b/nova/tests/unit/scheduler/filters/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/__init__.py
diff --git a/nova/tests/unit/scheduler/filters/test_affinity_filters.py b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
new file mode 100644
index 0000000000..d47d10a57d
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
@@ -0,0 +1,258 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova.scheduler.filters import affinity_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+CONF = cfg.CONF
+
+CONF.import_opt('my_ip', 'nova.netconf')
+
+
+@mock.patch('nova.compute.api.API.get_all')
+class TestDifferentHostFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestDifferentHostFilter, self).setUp()
+ self.filt_cls = affinity_filter.DifferentHostFilter()
+
+ def test_affinity_different_filter_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': ['fake'], }}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_no_list_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': 'fake'}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_fails(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.instances]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': ['fake'], }}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_handles_none(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(get_all_mock.called)
+
+
+@mock.patch('nova.compute.api.API.get_all')
+class TestSameHostFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestSameHostFilter, self).setUp()
+ self.filt_cls = affinity_filter.SameHostFilter()
+
+ def test_affinity_same_filter_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.images]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': ['fake'], }}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_no_list_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.images]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': 'fake'}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_fails(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': ['fake'], }}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_handles_none(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(get_all_mock.called)
+
+
+class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestSimpleCIDRAffinityFilter, self).setUp()
+ self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
+
+ def test_affinity_simple_cidr_filter_passes(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ host.host_ip = '10.8.1.1'
+
+ affinity_ip = "10.8.1.100"
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'cidr': '/24',
+ 'build_near_host_ip': affinity_ip}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_affinity_simple_cidr_filter_fails(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ host.host_ip = '10.8.1.1'
+
+ affinity_ip = "10.8.1.100"
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'cidr': '/32',
+ 'build_near_host_ip': affinity_ip}}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_affinity_simple_cidr_filter_handles_none(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ affinity_ip = CONF.my_ip.split('.')[0:3]
+ affinity_ip.append('100')
+ affinity_ip = str.join('.', affinity_ip)
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+
+class TestGroupAffinityFilter(test.NoDBTestCase):
+
+ def _test_group_anti_affinity_filter_passes(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': ['affinity']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': [policy]}
+ filter_properties['group_hosts'] = []
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties['group_hosts'] = ['host2']
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_passes(self):
+ self._test_group_anti_affinity_filter_passes(
+ affinity_filter.ServerGroupAntiAffinityFilter(),
+ 'anti-affinity')
+
+ def test_group_anti_affinity_filter_passes_legacy(self):
+ self._test_group_anti_affinity_filter_passes(
+ affinity_filter.GroupAntiAffinityFilter(),
+ 'legacy')
+
+ def _test_group_anti_affinity_filter_fails(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_policies': [policy],
+ 'group_hosts': ['host1']}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_fails(self):
+ self._test_group_anti_affinity_filter_fails(
+ affinity_filter.ServerGroupAntiAffinityFilter(),
+ 'anti-affinity')
+
+ def test_group_anti_affinity_filter_fails_legacy(self):
+ self._test_group_anti_affinity_filter_fails(
+ affinity_filter.GroupAntiAffinityFilter(),
+ 'legacy')
+
+ def _test_group_affinity_filter_passes(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': ['anti-affinity']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': ['affinity'],
+ 'group_hosts': ['host1']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_affinity_filter_passes(self):
+ self._test_group_affinity_filter_passes(
+ affinity_filter.ServerGroupAffinityFilter(), 'affinity')
+
+ def test_group_affinity_filter_passes_legacy(self):
+ self._test_group_affinity_filter_passes(
+ affinity_filter.GroupAffinityFilter(), 'legacy')
+
+ def _test_group_affinity_filter_fails(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_policies': [policy],
+ 'group_hosts': ['host2']}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_affinity_filter_fails(self):
+ self._test_group_affinity_filter_fails(
+ affinity_filter.ServerGroupAffinityFilter(), 'affinity')
+
+ def test_group_affinity_filter_fails_legacy(self):
+ self._test_group_affinity_filter_fails(
+ affinity_filter.GroupAffinityFilter(), 'legacy')
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
new file mode 100644
index 0000000000..b4eacf321f
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
@@ -0,0 +1,98 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import aggregate_image_properties_isolation as aipi
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAggImagePropsIsolationFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggImagePropsIsolationFilter, self).setUp()
+ self.filt_cls = aipi.AggregateImagePropertiesIsolation()
+
+ def test_aggregate_image_properties_isolation_passes(self, agg_mock):
+ agg_mock.return_value = {'foo': 'bar'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_multi_props_passes(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar', 'foo2': 'bar2'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar',
+ 'foo2': 'bar2'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_with_meta_passes(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_imgprops_passes(self,
+ agg_mock):
+ agg_mock.return_value = {}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_not_match_fails(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'no-bar'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_not_match2_fails(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar', 'foo2': 'bar2'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar',
+ 'foo2': 'bar3'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_namespace(self,
+ agg_mock):
+ self.flags(aggregate_image_properties_isolation_namespace="np")
+ agg_mock.return_value = {'np.foo': 'bar', 'foo2': 'bar2'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'np.foo': 'bar',
+ 'foo2': 'bar3'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
new file mode 100644
index 0000000000..4512841062
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
@@ -0,0 +1,72 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAggregateInstanceExtraSpecsFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateInstanceExtraSpecsFilter, self).setUp()
+ self.filt_cls = agg_specs.AggregateInstanceExtraSpecsFilter()
+
+ def test_aggregate_filter_passes_no_extra_specs(self, agg_mock):
+ capabilities = {'opt1': 1, 'opt2': 2}
+
+ filter_properties = {'context': mock.sentinel.ctx, 'instance_type':
+ {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(agg_mock.called)
+
+ def _do_test_aggregate_filter_extra_specs(self, especs, passes):
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
+ assertion = self.assertTrue if passes else self.assertFalse
+ assertion(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_filter_passes_extra_specs_simple(self, agg_mock):
+ agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
+ especs = {
+ # Un-scoped extra spec
+ 'opt1': '1',
+ # Scoped extra spec that applies to this filter
+ 'aggregate_instance_extra_specs:opt2': '2',
+ # Scoped extra spec that does not apply to this filter
+ 'trust:trusted_host': 'true',
+ }
+ self._do_test_aggregate_filter_extra_specs(especs, passes=True)
+
+ def test_aggregate_filter_passes_with_key_same_as_scope(self, agg_mock):
+ agg_mock.return_value = {'aggregate_instance_extra_specs': '1'}
+ especs = {
+ # Un-scoped extra spec, make sure we don't blow up if it
+ # happens to match our scope.
+ 'aggregate_instance_extra_specs': '1',
+ }
+ self._do_test_aggregate_filter_extra_specs(especs, passes=True)
+
+ def test_aggregate_filter_fails_extra_specs_simple(self, agg_mock):
+ agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
+ especs = {
+ 'opt1': '1',
+ 'opt2': '222',
+ 'trust:trusted_host': 'true'
+ }
+ self._do_test_aggregate_filter_extra_specs(especs, passes=False)
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
new file mode 100644
index 0000000000..70fe5e2d41
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
@@ -0,0 +1,53 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import aggregate_multitenancy_isolation as ami
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAggregateMultitenancyIsolationFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateMultitenancyIsolationFilter, self).setUp()
+ self.filt_cls = ami.AggregateMultiTenancyIsolation()
+
+ def test_aggregate_multi_tenancy_isolation_with_meta_passes(self,
+ agg_mock):
+ agg_mock.return_value = {'filter_tenant_id': 'my_tenantid'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_multi_tenancy_isolation_fails(self, agg_mock):
+ agg_mock.return_value = {'filter_tenant_id': 'other_tenantid'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_multi_tenancy_isolation_no_meta_passes(self, agg_mock):
+ agg_mock.return_value = {}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
new file mode 100644
index 0000000000..3cf860dfb2
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
@@ -0,0 +1,48 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import availability_zone_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAvailabilityZoneFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAvailabilityZoneFilter, self).setUp()
+ self.filt_cls = availability_zone_filter.AvailabilityZoneFilter()
+
+ @staticmethod
+ def _make_zone_request(zone):
+ return {
+ 'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'availability_zone': zone
+ }
+ }
+ }
+
+ def test_availability_zone_filter_same(self, agg_mock):
+ agg_mock.return_value = {'availability_zone': 'nova'}
+ request = self._make_zone_request('nova')
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, request))
+
+ def test_availability_zone_filter_different(self, agg_mock):
+ agg_mock.return_value = {'availability_zone': 'nova'}
+ request = self._make_zone_request('bad')
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, request))
diff --git a/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
new file mode 100644
index 0000000000..506b207d2a
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
@@ -0,0 +1,99 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+from nova.scheduler.filters import compute_capabilities_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestComputeCapabilitiesFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestComputeCapabilitiesFilter, self).setUp()
+ self.filt_cls = compute_capabilities_filter.ComputeCapabilitiesFilter()
+
+ def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
+ # In real OpenStack runtime environment,compute capabilities
+ # value may be number, so we should use number to do unit test.
+ capabilities = {}
+ capabilities.update(ecaps)
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'extra_specs': especs}}
+ host_state = {'free_ram_mb': 1024}
+ host_state.update(capabilities)
+ host = fakes.FakeHostState('host1', 'node1', host_state)
+ assertion = self.assertTrue if passes else self.assertFalse
+ assertion(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_pass_cpu_info_as_text_type(self):
+ cpu_info = """ { "vendor": "Intel", "model": "core2duo",
+ "arch": "i686","features": ["lahf_lm", "rdtscp"], "topology":
+ {"cores": 1, "threads":1, "sockets": 1}} """
+
+ cpu_info = six.text_type(cpu_info)
+
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'cpu_info': cpu_info},
+ especs={'capabilities:cpu_info:vendor': 'Intel'},
+ passes=True)
+
+ def test_compute_filter_fail_cpu_info_as_text_type_not_valid(self):
+ cpu_info = "cpu_info"
+
+ cpu_info = six.text_type(cpu_info)
+
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'cpu_info': cpu_info},
+ especs={'capabilities:cpu_info:vendor': 'Intel'},
+ passes=False)
+
+ def test_compute_filter_passes_extra_specs_simple(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': 1, 'opt2': 2}},
+ especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_fails_extra_specs_simple(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': 1, 'opt2': 2}},
+ especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'},
+ passes=False)
+
+ def test_compute_filter_pass_extra_specs_simple_with_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': 1, 'opt2': 2}},
+ especs={'capabilities:opt1': '1',
+ 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_pass_extra_specs_same_as_scope(self):
+ # Make sure this still works even if the key is the same as the scope
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'capabilities': 1},
+ especs={'capabilities': '1'},
+ passes=True)
+
+ def test_compute_filter_extra_specs_simple_with_wrong_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': 1, 'opt2': 2},
+ especs={'wrong_scope:opt1': '1',
+ 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_extra_specs_pass_multi_level_with_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': {'a': 1, 'b': {'aa': 2}}, 'opt2': 2}},
+ especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2',
+ 'trust:trusted_host': 'true'},
+ passes=True)
diff --git a/nova/tests/unit/scheduler/filters/test_compute_filters.py b/nova/tests/unit/scheduler/filters/test_compute_filters.py
new file mode 100644
index 0000000000..7e31e1ef5a
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_compute_filters.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import compute_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.servicegroup.API.service_is_up')
+class TestComputeFilter(test.NoDBTestCase):
+
+ def test_compute_filter_manual_disable(self, service_up_mock):
+ filt_cls = compute_filter.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ service = {'disabled': True}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'service': service})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(service_up_mock.called)
+
+ def test_compute_filter_sgapi_passes(self, service_up_mock):
+ filt_cls = compute_filter.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'service': service})
+ service_up_mock.return_value = True
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ service_up_mock.assert_called_once_with(service)
+
+ def test_compute_filter_sgapi_fails(self, service_up_mock):
+ filt_cls = compute_filter.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ service = {'disabled': False, 'updated_at': 'now'}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'service': service})
+ service_up_mock.return_value = False
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ service_up_mock.assert_called_once_with(service)
diff --git a/nova/tests/unit/scheduler/filters/test_core_filters.py b/nova/tests/unit/scheduler/filters/test_core_filters.py
new file mode 100644
index 0000000000..cfe2c51be6
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_core_filters.py
@@ -0,0 +1,87 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import core_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestCoreFilter(test.NoDBTestCase):
+
+ def test_core_filter_passes(self):
+ self.filt_cls = core_filter.CoreFilter()
+ filter_properties = {'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 7})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_core_filter_fails_safe(self):
+ self.filt_cls = core_filter.CoreFilter()
+ filter_properties = {'instance_type': {'vcpus': 1}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_core_filter_fails(self):
+ self.filt_cls = core_filter.CoreFilter()
+ filter_properties = {'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_core_filter_value_error(self, agg_mock):
+ self.filt_cls = core_filter.AggregateCoreFilter()
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 7})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'cpu_allocation_ratio')
+ self.assertEqual(4 * 2, host.limits['vcpu'])
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_core_filter_default_value(self, agg_mock):
+ self.filt_cls = core_filter.AggregateCoreFilter()
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ agg_mock.return_value = set([])
+ # False: fallback to default flag w/o aggregates
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'cpu_allocation_ratio')
+ # True: use ratio from aggregates
+ agg_mock.return_value = set(['3'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(4 * 3, host.limits['vcpu'])
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_core_filter_conflict_values(self, agg_mock):
+ self.filt_cls = core_filter.AggregateCoreFilter()
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=1)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ agg_mock.return_value = set(['2', '3'])
+ # use the minimum ratio from aggregates
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(4 * 2, host.limits['vcpu'])
diff --git a/nova/tests/unit/scheduler/filters/test_disk_filters.py b/nova/tests/unit/scheduler/filters/test_disk_filters.py
new file mode 100644
index 0000000000..14e9328732
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_disk_filters.py
@@ -0,0 +1,100 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import disk_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestDiskFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestDiskFilter, self).setUp()
+
+ def test_disk_filter_passes(self):
+ self.flags(disk_allocation_ratio=1.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 1,
+ 'ephemeral_gb': 1, 'swap': 512}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_disk_filter_fails(self):
+ self.flags(disk_allocation_ratio=1.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 10,
+ 'ephemeral_gb': 1, 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_disk_filter_oversubscribe(self):
+ self.flags(disk_allocation_ratio=10.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 100,
+ 'ephemeral_gb': 18, 'swap': 1024}}
+ # 1GB used... so 119GB allowed...
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(12 * 10.0, host.limits['disk_gb'])
+
+ def test_disk_filter_oversubscribe_fail(self):
+ self.flags(disk_allocation_ratio=10.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 100,
+ 'ephemeral_gb': 19, 'swap': 1024}}
+ # 1GB used... so 119GB allowed...
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_disk_filter_value_error(self, agg_mock):
+ filt_cls = disk_filter.AggregateDiskFilter()
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {
+ 'context': mock.sentinel.ctx,
+ 'instance_type': {'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 3 * 1024,
+ 'total_usable_disk_gb': 1})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'disk_allocation_ratio')
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_disk_filter_default_value(self, agg_mock):
+ filt_cls = disk_filter.AggregateDiskFilter()
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {
+ 'context': mock.sentinel.ctx,
+ 'instance_type': {'root_gb': 2,
+ 'ephemeral_gb': 1,
+ 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 3 * 1024,
+ 'total_usable_disk_gb': 1})
+ # Uses global conf.
+ agg_mock.return_value = set([])
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'disk_allocation_ratio')
+
+ agg_mock.return_value = set(['2'])
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_extra_specs_ops.py b/nova/tests/unit/scheduler/filters/test_extra_specs_ops.py
new file mode 100644
index 0000000000..5f8f912a81
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_extra_specs_ops.py
@@ -0,0 +1,200 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import extra_specs_ops
+from nova import test
+
+
+class ExtraSpecsOpsTestCase(test.NoDBTestCase):
+ def _do_extra_specs_ops_test(self, value, req, matches):
+ assertion = self.assertTrue if matches else self.assertFalse
+ assertion(extra_specs_ops.match(value, req))
+
+ def test_extra_specs_matches_simple(self):
+ self._do_extra_specs_ops_test(
+ value='1',
+ req='1',
+ matches=True)
+
+ def test_extra_specs_fails_simple(self):
+ self._do_extra_specs_ops_test(
+ value='',
+ req='1',
+ matches=False)
+
+ def test_extra_specs_fails_simple2(self):
+ self._do_extra_specs_ops_test(
+ value='3',
+ req='1',
+ matches=False)
+
+ def test_extra_specs_fails_simple3(self):
+ self._do_extra_specs_ops_test(
+ value='222',
+ req='2',
+ matches=False)
+
+ def test_extra_specs_fails_with_bogus_ops(self):
+ self._do_extra_specs_ops_test(
+ value='4',
+ req='> 2',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_eq(self):
+ self._do_extra_specs_ops_test(
+ value='123',
+ req='= 123',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_eq2(self):
+ self._do_extra_specs_ops_test(
+ value='124',
+ req='= 123',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_eq(self):
+ self._do_extra_specs_ops_test(
+ value='34',
+ req='= 234',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_eq3(self):
+ self._do_extra_specs_ops_test(
+ value='34',
+ req='=',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_seq(self):
+ self._do_extra_specs_ops_test(
+ value='123',
+ req='s== 123',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_seq(self):
+ self._do_extra_specs_ops_test(
+ value='1234',
+ req='s== 123',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_sneq(self):
+ self._do_extra_specs_ops_test(
+ value='1234',
+ req='s!= 123',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_sneq(self):
+ self._do_extra_specs_ops_test(
+ value='123',
+ req='s!= 123',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sge(self):
+ self._do_extra_specs_ops_test(
+ value='1000',
+ req='s>= 234',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sle(self):
+ self._do_extra_specs_ops_test(
+ value='1234',
+ req='s<= 1000',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sl(self):
+ self._do_extra_specs_ops_test(
+ value='2',
+ req='s< 12',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sg(self):
+ self._do_extra_specs_ops_test(
+ value='12',
+ req='s> 2',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_in(self):
+ self._do_extra_specs_ops_test(
+ value='12311321',
+ req='<in> 11',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_in2(self):
+ self._do_extra_specs_ops_test(
+ value='12311321',
+ req='<in> 12311321',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_in3(self):
+ self._do_extra_specs_ops_test(
+ value='12311321',
+ req='<in> 12311321 <in>',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_in(self):
+ self._do_extra_specs_ops_test(
+ value='12310321',
+ req='<in> 11',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_in2(self):
+ self._do_extra_specs_ops_test(
+ value='12310321',
+ req='<in> 11 <in>',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_or(self):
+ self._do_extra_specs_ops_test(
+ value='12',
+ req='<or> 11 <or> 12',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_or2(self):
+ self._do_extra_specs_ops_test(
+ value='12',
+ req='<or> 11 <or> 12 <or>',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_or(self):
+ self._do_extra_specs_ops_test(
+ value='13',
+ req='<or> 11 <or> 12',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_or2(self):
+ self._do_extra_specs_ops_test(
+ value='13',
+ req='<or> 11 <or> 12 <or>',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_le(self):
+ self._do_extra_specs_ops_test(
+ value='2',
+ req='<= 10',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_le(self):
+ self._do_extra_specs_ops_test(
+ value='3',
+ req='<= 2',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_ge(self):
+ self._do_extra_specs_ops_test(
+ value='3',
+ req='>= 1',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_ge(self):
+ self._do_extra_specs_ops_test(
+ value='2',
+ req='>= 3',
+ matches=False)
diff --git a/nova/tests/unit/scheduler/filters/test_image_props_filters.py b/nova/tests/unit/scheduler/filters/test_image_props_filters.py
new file mode 100644
index 0000000000..ee3a175dce
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_image_props_filters.py
@@ -0,0 +1,189 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import arch
+from nova.compute import hvtype
+from nova.compute import vm_mode
+from nova.scheduler.filters import image_props_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+from nova import utils
+
+
+class TestImagePropsFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestImagePropsFilter, self).setUp()
+ self.filt_cls = image_props_filter.ImagePropertiesFilter()
+
+ def test_image_properties_filter_passes_same_inst_props_and_version(self):
+ img_props = {'properties': {'_architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.0,<6.2'
+ }}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_different_inst_props(self):
+ img_props = {'properties': {'architecture': arch.ARMV7,
+ 'hypervisor_type': hvtype.QEMU,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_different_hyper_version(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.2'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'enabled': True,
+ 'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_passes_partial_inst_props(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_partial_inst_props(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.XEN, vm_mode.XEN)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_passes_without_inst_props(self):
+ filter_properties = {'request_spec': {}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_without_host_props(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'enabled': True,
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_passes_without_hyper_version(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.0'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ capabilities = {'enabled': True,
+ 'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)]}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_with_unsupported_hyper_ver(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.0'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ capabilities = {'enabled': True,
+ 'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': 5000}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_pv_mode_compat(self):
+ # if an old image has 'pv' for a vm_mode it should be treated as xen
+ img_props = {'properties': {'vm_mode': 'pv'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.XEN, vm_mode.XEN)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_hvm_mode_compat(self):
+ # if an old image has 'hv' for a vm_mode it should be treated as xen
+ img_props = {'properties': {'vm_mode': 'hv'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_xen_arch_compat(self):
+ # if an old image has 'x86_32' for arch it should be treated as i686
+ img_props = {'properties': {'architecture': 'x86_32'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.I686, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_xen_hvtype_compat(self):
+ # if an old image has 'xapi' for hvtype it should be treated as xen
+ img_props = {'properties': {'hypervisor_type': 'xapi'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.I686, hvtype.XEN, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_baremetal_vmmode_compat(self):
+ # if an old image has 'baremetal' for vmmode it should be
+ # treated as hvm
+ img_props = {'properties': {'vm_mode': 'baremetal'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.I686, hvtype.BAREMETAL, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_io_ops_filters.py b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
new file mode 100644
index 0000000000..c558b7711f
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from nova.scheduler.filters import io_ops_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestNumInstancesFilter(test.NoDBTestCase):
+
+ def test_filter_num_iops_passes(self):
+ self.flags(max_io_ops_per_host=8)
+ self.filt_cls = io_ops_filter.IoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ filter_properties = {}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_filter_num_iops_fails(self):
+ self.flags(max_io_ops_per_host=8)
+ self.filt_cls = io_ops_filter.IoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 8})
+ filter_properties = {}
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_filter_num_iops_value(self, agg_mock):
+ self.flags(max_io_ops_per_host=7)
+ self.filt_cls = io_ops_filter.AggregateIoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ filter_properties = {'context': mock.sentinel.ctx}
+ agg_mock.return_value = set([])
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_io_ops_per_host')
+ agg_mock.return_value = set(['8'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_filter_num_iops_value_error(self, agg_mock):
+ self.flags(max_io_ops_per_host=8)
+ self.filt_cls = io_ops_filter.AggregateIoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ agg_mock.return_value = set(['XXX'])
+ filter_properties = {'context': mock.sentinel.ctx}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_io_ops_per_host')
diff --git a/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py b/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py
new file mode 100644
index 0000000000..343c86264c
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py
@@ -0,0 +1,90 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import isolated_hosts_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestIsolatedHostsFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestIsolatedHostsFilter, self).setUp()
+ self.filt_cls = isolated_hosts_filter.IsolatedHostsFilter()
+
+ def _do_test_isolated_hosts(self, host_in_list, image_in_list,
+ set_flags=True,
+ restrict_isolated_hosts_to_isolated_images=True):
+ if set_flags:
+ self.flags(isolated_images=['isolated_image'],
+ isolated_hosts=['isolated_host'],
+ restrict_isolated_hosts_to_isolated_images=
+ restrict_isolated_hosts_to_isolated_images)
+ host_name = 'isolated_host' if host_in_list else 'free_host'
+ image_ref = 'isolated_image' if image_in_list else 'free_image'
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': {'image_ref': image_ref}
+ }
+ }
+ host = fakes.FakeHostState(host_name, 'node', {})
+ return self.filt_cls.host_passes(host, filter_properties)
+
+ def test_isolated_hosts_fails_isolated_on_non_isolated(self):
+ self.assertFalse(self._do_test_isolated_hosts(False, True))
+
+ def test_isolated_hosts_fails_non_isolated_on_isolated(self):
+ self.assertFalse(self._do_test_isolated_hosts(True, False))
+
+ def test_isolated_hosts_passes_isolated_on_isolated(self):
+ self.assertTrue(self._do_test_isolated_hosts(True, True))
+
+ def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
+ self.assertTrue(self._do_test_isolated_hosts(False, False))
+
+ def test_isolated_hosts_no_config(self):
+ # If there are no hosts nor isolated images in the config, it should
+ # not filter at all. This is the default config.
+ self.assertTrue(self._do_test_isolated_hosts(False, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(True, False, False))
+ self.assertTrue(self._do_test_isolated_hosts(True, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(False, False, False))
+
+ def test_isolated_hosts_no_hosts_config(self):
+ self.flags(isolated_images=['isolated_image'])
+ # If there are no hosts in the config, it should only filter out
+ # images that are listed
+ self.assertFalse(self._do_test_isolated_hosts(False, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(True, False, False))
+ self.assertFalse(self._do_test_isolated_hosts(True, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(False, False, False))
+
+ def test_isolated_hosts_no_images_config(self):
+ self.flags(isolated_hosts=['isolated_host'])
+ # If there are no images in the config, it should only filter out
+ # isolated_hosts
+ self.assertTrue(self._do_test_isolated_hosts(False, True, False))
+ self.assertFalse(self._do_test_isolated_hosts(True, False, False))
+ self.assertFalse(self._do_test_isolated_hosts(True, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(False, False, False))
+
+ def test_isolated_hosts_less_restrictive(self):
+ # If there are isolated hosts and non isolated images
+ self.assertTrue(self._do_test_isolated_hosts(True, False, True, False))
+ # If there are isolated hosts and isolated images
+ self.assertTrue(self._do_test_isolated_hosts(True, True, True, False))
+ # If there are non isolated hosts and non isolated images
+ self.assertTrue(self._do_test_isolated_hosts(False, False, True,
+ False))
+ # If there are non isolated hosts and isolated images
+ self.assertFalse(self._do_test_isolated_hosts(False, True, True,
+ False))
diff --git a/nova/tests/unit/scheduler/filters/test_json_filters.py b/nova/tests/unit/scheduler/filters/test_json_filters.py
new file mode 100644
index 0000000000..c5ddca7520
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_json_filters.py
@@ -0,0 +1,289 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+
+from nova.scheduler.filters import json_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestJsonFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestJsonFilter, self).setUp()
+ self.filt_cls = json_filter.JsonFilter()
+ self.json_query = jsonutils.dumps(
+ ['and', ['>=', '$free_ram_mb', 1024],
+ ['>=', '$free_disk_mb', 200 * 1024]])
+
+ def test_json_filter_passes(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0},
+ 'scheduler_hints': {'query': self.json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': 200 * 1024})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_passes_with_no_query(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 0,
+ 'free_disk_mb': 0})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_memory(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0},
+ 'scheduler_hints': {'query': self.json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023,
+ 'free_disk_mb': 200 * 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_disk(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0},
+ 'scheduler_hints': {'query': self.json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': (200 * 1024) - 1})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_service_disabled(self):
+ json_query = jsonutils.dumps(
+ ['and', ['>=', '$free_ram_mb', 1024],
+ ['>=', '$free_disk_mb', 200 * 1024],
+ ['not', '$service.disabled']])
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'local_gb': 200},
+ 'scheduler_hints': {'query': json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': 200 * 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_happy_day(self):
+ # Test json filter more thoroughly.
+ raw = ['and',
+ '$capabilities.enabled',
+ ['=', '$capabilities.opt1', 'match'],
+ ['or',
+ ['and',
+ ['<', '$free_ram_mb', 30],
+ ['<', '$free_disk_mb', 300]],
+ ['and',
+ ['>', '$free_ram_mb', 30],
+ ['>', '$free_disk_mb', 300]]]]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+
+ # Passes
+ capabilities = {'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 10,
+ 'free_disk_mb': 200,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ # Passes
+ capabilities = {'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 40,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to capabilities being disabled
+ capabilities = {'enabled': False, 'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 40,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to being exact memory/disk we don't want
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 30,
+ 'free_disk_mb': 300,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to memory lower but disk higher
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 20,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to capabilities 'opt1' not equal
+ capabilities = {'enabled': True, 'opt1': 'no-match'}
+ service = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 20,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_basic_operators(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ # (operator, arguments, expected_result)
+ ops_to_test = [
+ ['=', [1, 1], True],
+ ['=', [1, 2], False],
+ ['<', [1, 2], True],
+ ['<', [1, 1], False],
+ ['<', [2, 1], False],
+ ['>', [2, 1], True],
+ ['>', [2, 2], False],
+ ['>', [2, 3], False],
+ ['<=', [1, 2], True],
+ ['<=', [1, 1], True],
+ ['<=', [2, 1], False],
+ ['>=', [2, 1], True],
+ ['>=', [2, 2], True],
+ ['>=', [2, 3], False],
+ ['in', [1, 1], True],
+ ['in', [1, 1, 2, 3], True],
+ ['in', [4, 1, 2, 3], False],
+ ['not', [True], False],
+ ['not', [False], True],
+ ['or', [True, False], True],
+ ['or', [False, False], False],
+ ['and', [True, True], True],
+ ['and', [False, False], False],
+ ['and', [True, False], False],
+ # Nested ((True or False) and (2 > 1)) == Passes
+ ['and', [['or', True, False], ['>', 2, 1]], True]]
+
+ for (op, args, expected) in ops_to_test:
+ raw = [op] + args
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertEqual(expected,
+ self.filt_cls.host_passes(host, filter_properties))
+
+ # This results in [False, True, False, True] and if any are True
+ # then it passes...
+ raw = ['not', True, False, True, False]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ # This results in [False, False, False] and if any are True
+ # then it passes...which this doesn't
+ raw = ['not', True, True, True]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_unknown_operator_raises(self):
+ raw = ['!=', 1, 2]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+ self.assertRaises(KeyError,
+ self.filt_cls.host_passes, host, filter_properties)
+
+ def test_json_filter_empty_filters_pass(self):
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+
+ raw = []
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ raw = {}
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_invalid_num_arguments_fails(self):
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+
+ raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ raw = ['>', 1]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_unknown_variable_ignored(self):
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+
+ raw = ['=', '$........', 1, 1]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ raw = ['=', '$foo', 2, 2]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_metrics_filters.py b/nova/tests/unit/scheduler/filters/test_metrics_filters.py
new file mode 100644
index 0000000000..9ae0f6c77c
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_metrics_filters.py
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import metrics_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestMetricsFilter(test.NoDBTestCase):
+
+ def test_metrics_filter_pass(self):
+ self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
+ filt_cls = metrics_filter.MetricsFilter()
+ metrics = dict(foo=1, bar=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ attribute_dict={'metrics': metrics})
+ self.assertTrue(filt_cls.host_passes(host, None))
+
+ def test_metrics_filter_missing_metrics(self):
+ self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
+ filt_cls = metrics_filter.MetricsFilter()
+ metrics = dict(foo=1)
+ host = fakes.FakeHostState('host1', 'node1',
+ attribute_dict={'metrics': metrics})
+ self.assertFalse(filt_cls.host_passes(host, None))
diff --git a/nova/tests/unit/scheduler/filters/test_num_instances_filters.py b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
new file mode 100644
index 0000000000..3db0eeb6e7
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import num_instances_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestNumInstancesFilter(test.NoDBTestCase):
+
+ def test_filter_num_instances_passes(self):
+ self.flags(max_instances_per_host=5)
+ self.filt_cls = num_instances_filter.NumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 4})
+ filter_properties = {}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_filter_num_instances_fails(self):
+ self.flags(max_instances_per_host=5)
+ self.filt_cls = num_instances_filter.NumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 5})
+ filter_properties = {}
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_filter_aggregate_num_instances_value(self, agg_mock):
+ self.flags(max_instances_per_host=4)
+ self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 5})
+ filter_properties = {'context': mock.sentinel.ctx}
+ agg_mock.return_value = set([])
+ # No aggregate defined for that host.
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_instances_per_host')
+ agg_mock.return_value = set(['6'])
+ # Aggregate defined for that host.
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_filter_aggregate_num_instances_value_error(self, agg_mock):
+ self.flags(max_instances_per_host=6)
+ self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'context': mock.sentinel.ctx}
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_instances_per_host')
diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
new file mode 100644
index 0000000000..3c8eb049c8
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
@@ -0,0 +1,151 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import objects
+from nova.objects import base as obj_base
+from nova.scheduler.filters import numa_topology_filter
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.scheduler import fakes
+from nova.virt import hardware
+
+
+class TestNUMATopologyFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestNUMATopologyFilter, self).setUp()
+ self.filt_cls = numa_topology_filter.NUMATopologyFilter()
+
+ def test_numa_topology_filter_pass(self):
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = None
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_fail_fit(self):
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([2]), 512),
+ hardware.VirtNUMATopologyCellInstance(2, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_fail_memory(self):
+ self.flags(ram_allocation_ratio=1)
+
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 1024),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_fail_cpu(self):
+ self.flags(cpu_allocation_ratio=1)
+
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([3, 4, 5]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_pass_set_limit(self):
+ self.flags(cpu_allocation_ratio=21)
+ self.flags(ram_allocation_ratio=1.3)
+
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ limits_topology = hardware.VirtNUMALimitTopology.from_json(
+ host.limits['numa_topology'])
+ self.assertEqual(limits_topology.cells[0].cpu_limit, 42)
+ self.assertEqual(limits_topology.cells[1].cpu_limit, 42)
+ self.assertEqual(limits_topology.cells[0].memory_limit, 665)
+ self.assertEqual(limits_topology.cells[1].memory_limit, 665)
diff --git a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
new file mode 100644
index 0000000000..57dd5ebc02
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
@@ -0,0 +1,67 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import objects
+from nova.scheduler.filters import pci_passthrough_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestPCIPassthroughFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestPCIPassthroughFilter, self).setUp()
+ self.filt_cls = pci_passthrough_filter.PciPassthroughFilter()
+
+ def test_pci_passthrough_pass(self):
+ pci_stats_mock = mock.MagicMock()
+ pci_stats_mock.support_requests.return_value = True
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086'}])
+ requests = objects.InstancePCIRequests(requests=[request])
+ filter_properties = {'pci_requests': requests}
+ host = fakes.FakeHostState(
+ 'host1', 'node1',
+ attribute_dict={'pci_stats': pci_stats_mock})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests)
+
+ def test_pci_passthrough_fail(self):
+ pci_stats_mock = mock.MagicMock()
+ pci_stats_mock.support_requests.return_value = False
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086'}])
+ requests = objects.InstancePCIRequests(requests=[request])
+ filter_properties = {'pci_requests': requests}
+ host = fakes.FakeHostState(
+ 'host1', 'node1',
+ attribute_dict={'pci_stats': pci_stats_mock})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests)
+
+ def test_pci_passthrough_no_pci_request(self):
+ filter_properties = {}
+ host = fakes.FakeHostState('h1', 'n1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_pci_passthrough_compute_stats(self):
+ requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
+ filter_properties = {'pci_requests': requests}
+ host = fakes.FakeHostState(
+ 'host1', 'node1',
+ attribute_dict={})
+ self.assertRaises(AttributeError, self.filt_cls.host_passes,
+ host, filter_properties)
diff --git a/nova/tests/unit/scheduler/filters/test_ram_filters.py b/nova/tests/unit/scheduler/filters/test_ram_filters.py
new file mode 100644
index 0000000000..c7a6df58c9
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_ram_filters.py
@@ -0,0 +1,89 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import ram_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestRamFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestRamFilter, self).setUp()
+ self.filt_cls = ram_filter.RamFilter()
+
+ def test_ram_filter_fails_on_memory(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 1.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_ram_filter_passes(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 1.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_ram_filter_oversubscribe(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 2.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': -1024, 'total_usable_ram_mb': 2048})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
+
+
+@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+class TestAggregateRamFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateRamFilter, self).setUp()
+ self.filt_cls = ram_filter.AggregateRamFilter()
+
+ def test_aggregate_ram_filter_value_error(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_default_value(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ # False: fallback to default flag w/o aggregates
+ agg_mock.return_value = set()
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.return_value = set(['2.0'])
+ # True: use ratio from aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_conflict_values(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ agg_mock.return_value = set(['1.5', '2.0'])
+ # use the minimum ratio from aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
diff --git a/nova/tests/unit/scheduler/filters/test_retry_filters.py b/nova/tests/unit/scheduler/filters/test_retry_filters.py
new file mode 100644
index 0000000000..04510cd419
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_retry_filters.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import retry_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestRetryFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestRetryFilter, self).setUp()
+ self.filt_cls = retry_filter.RetryFilter()
+
+ def test_retry_filter_disabled(self):
+ # Test case where retry/re-scheduling is disabled.
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_retry_filter_pass(self):
+ # Node not previously tried.
+ host = fakes.FakeHostState('host1', 'nodeX', {})
+ retry = dict(num_attempts=2,
+ hosts=[['host1', 'node1'], # same host, different node
+ ['host2', 'node2'], # different host and node
+ ])
+ filter_properties = dict(retry=retry)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_retry_filter_fail(self):
+ # Node was already tried.
+ host = fakes.FakeHostState('host1', 'node1', {})
+ retry = dict(num_attempts=1,
+ hosts=[['host1', 'node1']])
+ filter_properties = dict(retry=retry)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_trusted_filters.py b/nova/tests/unit/scheduler/filters/test_trusted_filters.py
new file mode 100644
index 0000000000..b6afb92ae0
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_trusted_filters.py
@@ -0,0 +1,203 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+import requests
+
+from nova.scheduler.filters import trusted_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+CONF = cfg.CONF
+
+
+@mock.patch.object(trusted_filter.AttestationService, '_request')
+class TestTrustedFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestTrustedFilter, self).setUp()
+ # TrustedFilter's constructor creates the attestation cache, which
+ # calls to get a list of all the compute nodes.
+ fake_compute_nodes = [
+ {'hypervisor_hostname': 'node1',
+ 'service': {'host': 'host1'},
+ }
+ ]
+ with mock.patch('nova.db.compute_node_get_all') as mocked:
+ mocked.return_value = fake_compute_nodes
+ self.filt_cls = trusted_filter.TrustedFilter()
+
+ def test_trusted_filter_default_passes(self, req_mock):
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(req_mock.called)
+
+ def test_trusted_filter_trusted_and_trusted_passes(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ req_mock.assert_called_once_with("POST", "PollHosts", ["node1"])
+
+ def test_trusted_filter_trusted_and_untrusted_fails(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_trusted_filter_untrusted_and_trusted_fails(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_trusted_filter_untrusted_and_untrusted_passes(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_trusted_filter_update_cache(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ self.filt_cls.host_passes(host, filter_properties) # Fill the caches
+
+ req_mock.reset_mock()
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(req_mock.called)
+
+ req_mock.reset_mock()
+
+ timeutils.set_time_override(timeutils.utcnow())
+ timeutils.advance_time_seconds(
+ CONF.trusted_computing.attestation_auth_timeout + 80)
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertTrue(req_mock.called)
+
+ timeutils.clear_time_override()
+
+ def test_trusted_filter_update_cache_timezone(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": "2012-09-09T05:10:40-04:00"}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ timeutils.set_time_override(
+ timeutils.normalize_time(
+ timeutils.parse_isotime("2012-09-09T09:10:40Z")))
+
+ self.filt_cls.host_passes(host, filter_properties) # Fill the caches
+
+ req_mock.reset_mock()
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(req_mock.called)
+
+ req_mock.reset_mock()
+ timeutils.advance_time_seconds(
+ CONF.trusted_computing.attestation_auth_timeout - 10)
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(req_mock.called)
+
+ timeutils.clear_time_override()
+
+ def test_trusted_filter_combine_hosts(self, req_mock):
+ fake_compute_nodes = [
+ {'hypervisor_hostname': 'node1',
+ 'service': {'host': 'host1'},
+ },
+ {'hypervisor_hostname': 'node2',
+ 'service': {'host': 'host2'},
+ },
+ ]
+ with mock.patch('nova.db.compute_node_get_all') as mocked:
+ mocked.return_value = fake_compute_nodes
+ self.filt_cls = trusted_filter.TrustedFilter()
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": "2012-09-09T05:10:40-04:00"}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ self.filt_cls.host_passes(host, filter_properties) # Fill the caches
+ req_mock.assert_called_once_with("POST", "PollHosts",
+ ["node1", "node2"])
+
+ def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self,
+ req_mock):
+ oat_data = {"hosts": [{"host_name": "host1",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.strtime(fmt="%c")},
+ {"host_name": "host2",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.strtime(fmt="%D")},
+ # This is just a broken date to ensure that
+ # we're not just arbitrarily accepting any
+ # date format.
+ ]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'host1', {})
+ bad_host = fakes.FakeHostState('host2', 'host2', {})
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(self.filt_cls.host_passes(bad_host,
+ filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_type_filters.py b/nova/tests/unit/scheduler/filters/test_type_filters.py
new file mode 100644
index 0000000000..3aebba1a76
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_type_filters.py
@@ -0,0 +1,56 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import type_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestTypeFilter(test.NoDBTestCase):
+
+ @mock.patch('nova.db.instance_get_all_by_host_and_not_type')
+ def test_type_filter(self, get_mock):
+ self.filt_cls = type_filter.TypeAffinityFilter()
+
+ host = fakes.FakeHostState('fake_host', 'fake_node', {})
+ filter_properties = {'context': mock.MagicMock(),
+ 'instance_type': {'id': 'fake1'}}
+ get_mock.return_value = []
+ # True since empty
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_mock.assert_called_once_with(
+ mock.ANY, # context...
+ 'fake_host',
+ 'fake1'
+ )
+ get_mock.return_value = [mock.sentinel.instances]
+ # False since not empty
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_type_filter(self, agg_mock):
+ self.filt_cls = type_filter.AggregateTypeAffinityFilter()
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'name': 'fake1'}}
+ filter2_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'name': 'fake2'}}
+ host = fakes.FakeHostState('fake_host', 'fake_node', {})
+ agg_mock.return_value = set(['fake1'])
+ # True since no aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'fake_host',
+ 'instance_type')
+ # False since type matches aggregate, metadata
+ self.assertFalse(self.filt_cls.host_passes(host, filter2_properties))
diff --git a/nova/tests/unit/scheduler/ironic_fakes.py b/nova/tests/unit/scheduler/ironic_fakes.py
new file mode 100644
index 0000000000..5c63afafe3
--- /dev/null
+++ b/nova/tests/unit/scheduler/ironic_fakes.py
@@ -0,0 +1,75 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake nodes for Ironic host manager tests.
+"""
+
+from oslo.serialization import jsonutils
+
+
+COMPUTE_NODES = [
+ dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=10, free_ram_mb=1024),
+ dict(id=2, local_gb=20, memory_mb=2048, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=20, free_ram_mb=2048),
+ dict(id=3, local_gb=30, memory_mb=3072, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=30, free_ram_mb=3072),
+ dict(id=4, local_gb=40, memory_mb=4096, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=40, free_ram_mb=4096),
+ # Broken entry
+ dict(id=5, local_gb=50, memory_mb=5120, vcpus=1, service=None,
+ cpu_info='baremetal cpu',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=50, free_ram_mb=5120),
+]
diff --git a/nova/tests/unit/scheduler/test_baremetal_host_manager.py b/nova/tests/unit/scheduler/test_baremetal_host_manager.py
new file mode 100644
index 0000000000..1f6e2d70fa
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_baremetal_host_manager.py
@@ -0,0 +1,81 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For BaremetalHostManager
+"""
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova.scheduler import baremetal_host_manager
+from nova.scheduler import host_manager
+from nova import test
+
+
+class BaremetalHostManagerTestCase(test.NoDBTestCase):
+ """Test case for BaremetalHostManager class."""
+
+ def setUp(self):
+ super(BaremetalHostManagerTestCase, self).setUp()
+ self.host_manager = baremetal_host_manager.BaremetalHostManager()
+
+ def test_manager_public_api_signatures(self):
+ self.assertPublicAPISignatures(host_manager.HostManager(),
+ self.host_manager)
+
+ def test_state_public_api_signatures(self):
+ self.assertPublicAPISignatures(
+ host_manager.HostState("dummy",
+ "dummy"),
+ baremetal_host_manager.BaremetalNodeState("dummy",
+ "dummy")
+ )
+
+ @mock.patch.object(baremetal_host_manager.BaremetalNodeState, '__init__')
+ def test_create_baremetal_node_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'baremetal cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(baremetal_host_manager.BaremetalNodeState,
+ type(host_state))
+
+ @mock.patch.object(host_manager.HostState, '__init__')
+ def test_create_non_baremetal_host_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'other cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(host_manager.HostState, type(host_state))
+
+
+class BaremetalNodeStateTestCase(test.NoDBTestCase):
+ """Test case for BaremetalNodeState class."""
+
+ def test_update_from_compute_node(self):
+ stats = {'cpu_arch': 'cpu_arch'}
+ json_stats = jsonutils.dumps(stats)
+ compute_node = {'memory_mb': 1024, 'free_disk_gb': 10,
+ 'free_ram_mb': 1024, 'vcpus': 1, 'vcpus_used': 0,
+ 'stats': json_stats}
+
+ host = baremetal_host_manager.BaremetalNodeState('fakehost',
+ 'fakenode')
+ host.update_from_compute_node(compute_node)
+
+ self.assertEqual(compute_node['free_ram_mb'], host.free_ram_mb)
+ self.assertEqual(compute_node['memory_mb'], host.total_usable_ram_mb)
+ self.assertEqual(compute_node['free_disk_gb'] * 1024,
+ host.free_disk_mb)
+ self.assertEqual(compute_node['vcpus'], host.vcpus_total)
+ self.assertEqual(compute_node['vcpus_used'], host.vcpus_used)
+ self.assertEqual(stats, host.stats)
diff --git a/nova/tests/unit/scheduler/test_caching_scheduler.py b/nova/tests/unit/scheduler/test_caching_scheduler.py
new file mode 100644
index 0000000000..15525f1b20
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_caching_scheduler.py
@@ -0,0 +1,199 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.utils import timeutils
+
+from nova import exception
+from nova.scheduler import caching_scheduler
+from nova.scheduler import host_manager
+from nova.tests.unit.scheduler import test_scheduler
+
+ENABLE_PROFILER = False
+
+
+class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for Caching Scheduler."""
+
+ driver_cls = caching_scheduler.CachingScheduler
+
+ @mock.patch.object(caching_scheduler.CachingScheduler,
+ "_get_up_hosts")
+ def test_run_periodic_tasks_loads_hosts(self, mock_up_hosts):
+ mock_up_hosts.return_value = []
+ context = mock.Mock()
+
+ self.driver.run_periodic_tasks(context)
+
+ self.assertTrue(mock_up_hosts.called)
+ self.assertEqual([], self.driver.all_host_states)
+ context.elevated.assert_called_with()
+
+ @mock.patch.object(caching_scheduler.CachingScheduler,
+ "_get_up_hosts")
+ def test_get_all_host_states_returns_cached_value(self, mock_up_hosts):
+ self.driver.all_host_states = []
+
+ self.driver._get_all_host_states(self.context)
+
+ self.assertFalse(mock_up_hosts.called)
+ self.assertEqual([], self.driver.all_host_states)
+
+ @mock.patch.object(caching_scheduler.CachingScheduler,
+ "_get_up_hosts")
+ def test_get_all_host_states_loads_hosts(self, mock_up_hosts):
+ mock_up_hosts.return_value = ["asdf"]
+
+ result = self.driver._get_all_host_states(self.context)
+
+ self.assertTrue(mock_up_hosts.called)
+ self.assertEqual(["asdf"], self.driver.all_host_states)
+ self.assertEqual(["asdf"], result)
+
+ def test_get_up_hosts(self):
+ with mock.patch.object(self.driver.host_manager,
+ "get_all_host_states") as mock_get_hosts:
+ mock_get_hosts.return_value = ["asdf"]
+
+ result = self.driver._get_up_hosts(self.context)
+
+ self.assertTrue(mock_get_hosts.called)
+ self.assertEqual(mock_get_hosts.return_value, result)
+
+ def test_select_destination_raises_with_no_hosts(self):
+ fake_request_spec = self._get_fake_request_spec()
+ self.driver.all_host_states = []
+
+ self.assertRaises(exception.NoValidHost,
+ self.driver.select_destinations,
+ self.context, fake_request_spec, {})
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_select_destination_works(self, mock_get_extra):
+ fake_request_spec = self._get_fake_request_spec()
+ fake_host = self._get_fake_host_state()
+ self.driver.all_host_states = [fake_host]
+
+ result = self._test_select_destinations(fake_request_spec)
+
+ self.assertEqual(1, len(result))
+ self.assertEqual(result[0]["host"], fake_host.host)
+
+ def _test_select_destinations(self, request_spec):
+ return self.driver.select_destinations(
+ self.context, request_spec, {})
+
+ def _get_fake_request_spec(self):
+ flavor = {
+ "flavorid": "small",
+ "memory_mb": 512,
+ "root_gb": 1,
+ "ephemeral_gb": 1,
+ "vcpus": 1,
+ }
+ instance_properties = {
+ "os_type": "linux",
+ "project_id": "1234",
+ "memory_mb": 512,
+ "root_gb": 1,
+ "ephemeral_gb": 1,
+ "vcpus": 1,
+ "uuid": 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ }
+ request_spec = {
+ "instance_type": flavor,
+ "instance_properties": instance_properties,
+ "num_instances": 1,
+ }
+ return request_spec
+
+ def _get_fake_host_state(self, index=0):
+ host_state = host_manager.HostState(
+ 'host_%s' % index,
+ 'node_%s' % index)
+ host_state.free_ram_mb = 50000
+ host_state.service = {
+ "disabled": False,
+ "updated_at": timeutils.utcnow(),
+ "created_at": timeutils.utcnow(),
+ }
+ return host_state
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_performance_check_select_destination(self, mock_get_extra):
+ hosts = 2
+ requests = 1
+
+ self.flags(service_down_time=240)
+
+ request_spec = self._get_fake_request_spec()
+ host_states = []
+ for x in xrange(hosts):
+ host_state = self._get_fake_host_state(x)
+ host_states.append(host_state)
+ self.driver.all_host_states = host_states
+
+ def run_test():
+ a = timeutils.utcnow()
+
+ for x in xrange(requests):
+ self.driver.select_destinations(
+ self.context, request_spec, {})
+
+ b = timeutils.utcnow()
+ c = b - a
+
+ seconds = (c.days * 24 * 60 * 60 + c.seconds)
+ microseconds = seconds * 1000 + c.microseconds / 1000.0
+ per_request_ms = microseconds / requests
+ return per_request_ms
+
+ per_request_ms = None
+ if ENABLE_PROFILER:
+ import pycallgraph
+ from pycallgraph import output
+ config = pycallgraph.Config(max_depth=10)
+ config.trace_filter = pycallgraph.GlobbingFilter(exclude=[
+ 'pycallgraph.*',
+ 'unittest.*',
+ 'nova.tests.unit.*',
+ ])
+ graphviz = output.GraphvizOutput(output_file='scheduler.png')
+
+ with pycallgraph.PyCallGraph(output=graphviz):
+ per_request_ms = run_test()
+
+ else:
+ per_request_ms = run_test()
+
+ # This has proved to be around 1 ms on a random dev box
+ # But this is here so you can do simply performance testing easily.
+ self.assertTrue(per_request_ms < 1000)
+
+
+if __name__ == '__main__':
+ # A handy tool to help profile the schedulers performance
+ ENABLE_PROFILER = True
+ import unittest
+ suite = unittest.TestSuite()
+ test = "test_performance_check_select_destination"
+ test_case = CachingSchedulerTestCase(test)
+ suite.addTest(test_case)
+ runner = unittest.TextTestRunner()
+ runner.run(suite)
diff --git a/nova/tests/unit/scheduler/test_chance_scheduler.py b/nova/tests/unit/scheduler/test_chance_scheduler.py
new file mode 100644
index 0000000000..73a4696ec3
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_chance_scheduler.py
@@ -0,0 +1,182 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Chance Scheduler.
+"""
+
+import random
+
+import mox
+
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.scheduler import chance
+from nova.scheduler import driver
+from nova.tests.unit.scheduler import test_scheduler
+
+
+class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for Chance Scheduler."""
+
+ driver_cls = chance.ChanceScheduler
+
+ def test_filter_hosts_avoid(self):
+ """Test to make sure _filter_hosts() filters original hosts if
+ avoid_original_host is True.
+ """
+
+ hosts = ['host1', 'host2', 'host3']
+ request_spec = dict(instance_properties=dict(host='host2'))
+ filter_properties = {'ignore_hosts': ['host2']}
+
+ filtered = self.driver._filter_hosts(request_spec, hosts,
+ filter_properties=filter_properties)
+ self.assertEqual(filtered, ['host1', 'host3'])
+
+ def test_filter_hosts_no_avoid(self):
+ """Test to make sure _filter_hosts() does not filter original
+ hosts if avoid_original_host is False.
+ """
+
+ hosts = ['host1', 'host2', 'host3']
+ request_spec = dict(instance_properties=dict(host='host2'))
+ filter_properties = {'ignore_hosts': []}
+
+ filtered = self.driver._filter_hosts(request_spec, hosts,
+ filter_properties=filter_properties)
+ self.assertEqual(filtered, hosts)
+
+ def test_basic_schedule_run_instance(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
+ instance1 = {'uuid': 'fake-uuid1'}
+ instance2 = {'uuid': 'fake-uuid2'}
+ request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
+ 'instance_properties': instance_opts}
+
+ def inc_launch_index(*args):
+ request_spec['instance_properties']['launch_index'] = (
+ request_spec['instance_properties']['launch_index'] + 1)
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(random, 'choice')
+ self.mox.StubOutWithMock(driver, 'instance_update_db')
+ self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ # instance 1
+ hosts_full = ['host1', 'host2', 'host3', 'host4']
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host3')
+ driver.instance_update_db(ctxt, instance1['uuid']).WithSideEffects(
+ inc_launch_index).AndReturn(instance1)
+ compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3',
+ instance=instance1, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec, filter_properties={},
+ legacy_bdm_in_spec=False)
+
+ # instance 2
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host1')
+ driver.instance_update_db(ctxt, instance2['uuid']).WithSideEffects(
+ inc_launch_index).AndReturn(instance2)
+ compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1',
+ instance=instance2, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec, filter_properties={},
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+ self.driver.schedule_run_instance(ctxt, request_spec,
+ None, None, None, None, {}, False)
+
+ def test_basic_schedule_run_instance_no_hosts(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ uuid = 'fake-uuid1'
+ instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
+ request_spec = {'instance_uuids': [uuid],
+ 'instance_properties': instance_opts}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ # instance 1
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([])
+ old_ref, new_ref = db.instance_update_and_get_original(ctxt, uuid,
+ {'vm_state': vm_states.ERROR,
+ 'task_state': None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(ctxt, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.driver.schedule_run_instance(
+ ctxt, request_spec, None, None, None, None, {}, False)
+
+ def test_select_destinations(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ request_spec = {'num_instances': 2}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(random, 'choice')
+
+ hosts_full = ['host1', 'host2', 'host3', 'host4']
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host3')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host2')
+
+ self.mox.ReplayAll()
+ dests = self.driver.select_destinations(ctxt, request_spec, {})
+ self.assertEqual(2, len(dests))
+ (host, node) = (dests[0]['host'], dests[0]['nodename'])
+ self.assertEqual('host3', host)
+ self.assertIsNone(node)
+ (host, node) = (dests[1]['host'], dests[1]['nodename'])
+ self.assertEqual('host2', host)
+ self.assertIsNone(node)
+
+ def test_select_destinations_no_valid_host(self):
+
+ def _return_no_host(*args, **kwargs):
+ return []
+
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.driver.hosts_up(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn([1, 2])
+ self.stubs.Set(self.driver, '_filter_hosts', _return_no_host)
+ self.mox.ReplayAll()
+
+ request_spec = {'num_instances': 1}
+ self.assertRaises(exception.NoValidHost,
+ self.driver.select_destinations, self.context,
+ request_spec, {})
diff --git a/nova/tests/unit/scheduler/test_client.py b/nova/tests/unit/scheduler/test_client.py
new file mode 100644
index 0000000000..5ea915c4f6
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_client.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.conductor import api as conductor_api
+from nova import context
+from nova import exception
+from nova.scheduler import client as scheduler_client
+from nova.scheduler.client import query as scheduler_query_client
+from nova.scheduler.client import report as scheduler_report_client
+from nova.scheduler import rpcapi as scheduler_rpcapi
+from nova import test
+"""Tests for Scheduler Client."""
+
+
+class SchedulerReportClientTestCase(test.TestCase):
+
+ def setUp(self):
+ super(SchedulerReportClientTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ self.flags(use_local=True, group='conductor')
+
+ self.client = scheduler_report_client.SchedulerReportClient()
+
+ def test_constructor(self):
+ self.assertIsNotNone(self.client.conductor_api)
+
+ @mock.patch.object(conductor_api.LocalAPI, 'compute_node_update')
+ def test_update_compute_node_works(self, mock_cn_update):
+ stats = {"id": 1, "foo": "bar"}
+ self.client.update_resource_stats(self.context,
+ ('fakehost', 'fakenode'),
+ stats)
+ mock_cn_update.assert_called_once_with(self.context,
+ {"id": 1},
+ {"foo": "bar"})
+
+ def test_update_compute_node_raises(self):
+ stats = {"foo": "bar"}
+ self.assertRaises(exception.ComputeHostNotCreated,
+ self.client.update_resource_stats,
+ self.context, ('fakehost', 'fakenode'), stats)
+
+
+class SchedulerQueryClientTestCase(test.TestCase):
+
+ def setUp(self):
+ super(SchedulerQueryClientTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ self.client = scheduler_query_client.SchedulerQueryClient()
+
+ def test_constructor(self):
+ self.assertIsNotNone(self.client.scheduler_rpcapi)
+
+ @mock.patch.object(scheduler_rpcapi.SchedulerAPI, 'select_destinations')
+ def test_select_destinations(self, mock_select_destinations):
+ self.client.select_destinations(
+ context=self.context,
+ request_spec='fake_request_spec',
+ filter_properties='fake_prop'
+ )
+ mock_select_destinations.assert_called_once_with(
+ self.context,
+ 'fake_request_spec',
+ 'fake_prop')
+
+
+class SchedulerClientTestCase(test.TestCase):
+
+ def setUp(self):
+ super(SchedulerClientTestCase, self).setUp()
+ self.client = scheduler_client.SchedulerClient()
+
+ def test_constructor(self):
+ self.assertIsNotNone(self.client.queryclient)
+ self.assertIsNotNone(self.client.reportclient)
+
+ @mock.patch.object(scheduler_query_client.SchedulerQueryClient,
+ 'select_destinations')
+ def test_select_destinations(self, mock_select_destinations):
+ self.assertIsNone(self.client.queryclient.instance)
+
+ self.client.select_destinations('ctxt', 'fake_spec', 'fake_prop')
+
+ self.assertIsNotNone(self.client.queryclient.instance)
+ mock_select_destinations.assert_called_once_with(
+ 'ctxt', 'fake_spec', 'fake_prop')
+
+ @mock.patch.object(scheduler_report_client.SchedulerReportClient,
+ 'update_resource_stats')
+ def test_update_resource_stats(self, mock_update_resource_stats):
+ self.assertIsNone(self.client.reportclient.instance)
+
+ self.client.update_resource_stats('ctxt', 'fake_name', 'fake_stats')
+
+ self.assertIsNotNone(self.client.reportclient.instance)
+ mock_update_resource_stats.assert_called_once_with(
+ 'ctxt', 'fake_name', 'fake_stats')
diff --git a/nova/tests/unit/scheduler/test_filter_scheduler.py b/nova/tests/unit/scheduler/test_filter_scheduler.py
new file mode 100644
index 0000000000..96231ef13a
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_filter_scheduler.py
@@ -0,0 +1,596 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Filter Scheduler.
+"""
+
+import mock
+import mox
+
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.scheduler import driver
+from nova.scheduler import filter_scheduler
+from nova.scheduler import host_manager
+from nova.scheduler import utils as scheduler_utils
+from nova.scheduler import weights
+from nova.tests.unit.scheduler import fakes
+from nova.tests.unit.scheduler import test_scheduler
+
+
+def fake_get_filtered_hosts(hosts, filter_properties, index):
+ return list(hosts)
+
+
+class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for Filter Scheduler."""
+
+ driver_cls = filter_scheduler.FilterScheduler
+
+ def test_run_instance_no_hosts(self):
+ sched = fakes.FakeFilterScheduler()
+ uuid = 'fake-uuid1'
+ fake_context = context.RequestContext('user', 'project')
+ instance_properties = {'project_id': 1, 'os_type': 'Linux'}
+ request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
+ 'ephemeral_gb': 0},
+ 'instance_properties': instance_properties,
+ 'instance_uuids': [uuid]}
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ old_ref, new_ref = db.instance_update_and_get_original(fake_context,
+ uuid, {'vm_state': vm_states.ERROR, 'task_state':
+ None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(fake_context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
+
+ self.mox.ReplayAll()
+ sched.schedule_run_instance(
+ fake_context, request_spec, None, None,
+ None, None, {}, False)
+
+ def test_run_instance_non_admin(self):
+ self.was_admin = False
+
+ def fake_get(context, *args, **kwargs):
+ # make sure this is called with admin context, even though
+ # we're using user context below
+ self.was_admin = context.is_admin
+ return {}
+
+ sched = fakes.FakeFilterScheduler()
+ self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get)
+
+ fake_context = context.RequestContext('user', 'project')
+
+ uuid = 'fake-uuid1'
+ instance_properties = {'project_id': 1, 'os_type': 'Linux'}
+ request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
+ 'instance_properties': instance_properties,
+ 'instance_uuids': [uuid]}
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ old_ref, new_ref = db.instance_update_and_get_original(fake_context,
+ uuid, {'vm_state': vm_states.ERROR, 'task_state':
+ None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(fake_context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ sched.schedule_run_instance(
+ fake_context, request_spec, None, None, None, None, {}, False)
+ self.assertTrue(self.was_admin)
+
+ def test_scheduler_includes_launch_index(self):
+ fake_context = context.RequestContext('user', 'project')
+ instance_opts = {'fake_opt1': 'meow'}
+ request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
+ 'instance_properties': instance_opts}
+ instance1 = {'uuid': 'fake-uuid1'}
+ instance2 = {'uuid': 'fake-uuid2'}
+
+ def _has_launch_index(expected_index):
+ """Return a function that verifies the expected index."""
+ def _check_launch_index(value):
+ if 'instance_properties' in value:
+ if 'launch_index' in value['instance_properties']:
+ index = value['instance_properties']['launch_index']
+ if index == expected_index:
+ return True
+ return False
+ return _check_launch_index
+
+ self.mox.StubOutWithMock(self.driver, '_schedule')
+ self.mox.StubOutWithMock(self.driver, '_provision_resource')
+
+ expected_filter_properties = {'retry': {'num_attempts': 1,
+ 'hosts': []}}
+ self.driver._schedule(fake_context, request_spec,
+ expected_filter_properties).AndReturn(['host1', 'host2'])
+ # instance 1
+ self.driver._provision_resource(
+ fake_context, 'host1',
+ mox.Func(_has_launch_index(0)), expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False).AndReturn(instance1)
+ # instance 2
+ self.driver._provision_resource(
+ fake_context, 'host2',
+ mox.Func(_has_launch_index(1)), expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid2',
+ legacy_bdm_in_spec=False).AndReturn(instance2)
+ self.mox.ReplayAll()
+
+ self.driver.schedule_run_instance(fake_context, request_spec,
+ None, None, None, None, {}, False)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_happy_day(self, mock_get_extra):
+ """Make sure there's nothing glaringly wrong with _schedule()
+ by doing a happy day pass through.
+ """
+
+ self.next_weight = 1.0
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ request_spec = {'num_instances': 10,
+ 'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1},
+ 'instance_properties': {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}}
+ self.mox.ReplayAll()
+ weighed_hosts = sched._schedule(fake_context, request_spec, {})
+ self.assertEqual(len(weighed_hosts), 10)
+ for weighed_host in weighed_hosts:
+ self.assertIsNotNone(weighed_host.obj)
+
+ def test_max_attempts(self):
+ self.flags(scheduler_max_attempts=4)
+ self.assertEqual(4, scheduler_utils._max_attempts())
+
+ def test_invalid_max_attempts(self):
+ self.flags(scheduler_max_attempts=0)
+ self.assertRaises(exception.NovaException,
+ scheduler_utils._max_attempts)
+
+ def test_retry_disabled(self):
+ # Retry info should not get populated when re-scheduling is off.
+ self.flags(scheduler_max_attempts=1)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {}
+
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_force_hosts(self):
+ # Retry info should not get populated when re-scheduling is off.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'force_hosts': ['force_host']}
+
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_force_nodes(self):
+ # Retry info should not get populated when re-scheduling is off.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'force_nodes': ['force_node']}
+
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_attempt_one(self):
+ # Test retry logic on initial scheduling attempt.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {}
+ expected_filter_properties = {'retry': {'num_attempts': 1,
+ 'hosts': []}}
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ expected_filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_attempt_two(self):
+ # Test retry logic when re-scheduling.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'retry': {'num_attempts': 1}}
+ expected_filter_properties = {'retry': {'num_attempts': 2}}
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ expected_filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_exceeded_max_attempts(self):
+ # Test for necessary explosion when max retries is exceeded and that
+ # the information needed in request_spec is still present for error
+ # handling
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'retry': {'num_attempts': 2}}
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
+ self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_add_retry_host(self):
+ retry = dict(num_attempts=1, hosts=[])
+ filter_properties = dict(retry=retry)
+ host = "fakehost"
+ node = "fakenode"
+
+ scheduler_utils._add_retry_host(filter_properties, host, node)
+
+ hosts = filter_properties['retry']['hosts']
+ self.assertEqual(1, len(hosts))
+ self.assertEqual([host, node], hosts[0])
+
+ def test_post_select_populate(self):
+ # Test addition of certain filter props after a node is selected.
+ retry = {'hosts': [], 'num_attempts': 1}
+ filter_properties = {'retry': retry}
+
+ host_state = host_manager.HostState('host', 'node')
+ host_state.limits['vcpus'] = 5
+ scheduler_utils.populate_filter_properties(filter_properties,
+ host_state)
+
+ self.assertEqual(['host', 'node'],
+ filter_properties['retry']['hosts'][0])
+
+ self.assertEqual({'vcpus': 5}, host_state.limits)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_host_pool(self, mock_get_extra):
+ """Make sure the scheduler_host_subset_size property works properly."""
+
+ self.flags(scheduler_host_subset_size=2)
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}
+
+ request_spec = dict(instance_properties=instance_properties,
+ instance_type={})
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chosen
+ self.assertEqual(len(hosts), 1)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_large_host_pool(self, mock_get_extra):
+ """Hosts should still be chosen if pool size
+ is larger than number of filtered hosts.
+ """
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.flags(scheduler_host_subset_size=20)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}
+ request_spec = dict(instance_properties=instance_properties,
+ instance_type={})
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chose
+ self.assertEqual(len(hosts), 1)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_chooses_best_host(self, mock_get_extra):
+ """If scheduler_host_subset_size is 1, the largest host with greatest
+ weight should be returned.
+ """
+
+ self.flags(scheduler_host_subset_size=1)
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ self.next_weight = 50
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ this_weight = self.next_weight
+ self.next_weight = 0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, this_weight)]
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}
+
+ request_spec = dict(instance_properties=instance_properties,
+ instance_type={})
+
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chosen
+ self.assertEqual(1, len(hosts))
+
+ self.assertEqual(50, hosts[0].weight)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_select_destinations(self, mock_get_extra):
+ """select_destinations is basically a wrapper around _schedule().
+
+ Similar to the _schedule tests, this just does a happy path test to
+ ensure there is nothing glaringly wrong.
+ """
+
+ self.next_weight = 1.0
+
+ selected_hosts = []
+ selected_nodes = []
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ selected_hosts.append(host_state.host)
+ selected_nodes.append(host_state.nodename)
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ request_spec = {'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1},
+ 'instance_properties': {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'},
+ 'num_instances': 1}
+ self.mox.ReplayAll()
+ dests = sched.select_destinations(fake_context, request_spec, {})
+ (host, node) = (dests[0]['host'], dests[0]['nodename'])
+ self.assertEqual(host, selected_hosts[0])
+ self.assertEqual(node, selected_nodes[0])
+
+ @mock.patch.object(filter_scheduler.FilterScheduler, '_schedule')
+ def test_select_destinations_notifications(self, mock_schedule):
+ mock_schedule.return_value = [mock.Mock()]
+
+ with mock.patch.object(self.driver.notifier, 'info') as mock_info:
+ request_spec = {'num_instances': 1}
+
+ self.driver.select_destinations(self.context, request_spec, {})
+
+ expected = [
+ mock.call(self.context, 'scheduler.select_destinations.start',
+ dict(request_spec=request_spec)),
+ mock.call(self.context, 'scheduler.select_destinations.end',
+ dict(request_spec=request_spec))]
+ self.assertEqual(expected, mock_info.call_args_list)
+
+ def test_select_destinations_no_valid_host(self):
+
+ def _return_no_host(*args, **kwargs):
+ return []
+
+ self.stubs.Set(self.driver, '_schedule', _return_no_host)
+ self.assertRaises(exception.NoValidHost,
+ self.driver.select_destinations, self.context,
+ {'num_instances': 1}, {})
+
+ def test_select_destinations_no_valid_host_not_enough(self):
+ # Tests that we have fewer hosts available than number of instances
+ # requested to build.
+ with mock.patch.object(self.driver, '_schedule',
+ return_value=[mock.sentinel.host1]):
+ try:
+ self.driver.select_destinations(
+ self.context, {'num_instances': 2}, {})
+ self.fail('Expected NoValidHost to be raised.')
+ except exception.NoValidHost as e:
+ # Make sure that we provided a reason why NoValidHost.
+ self.assertIn('reason', e.kwargs)
+ self.assertTrue(len(e.kwargs['reason']) > 0)
+
+ def test_handles_deleted_instance(self):
+ """Test instance deletion while being scheduled."""
+
+ def _raise_instance_not_found(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='123')
+
+ self.stubs.Set(driver, 'instance_update_db',
+ _raise_instance_not_found)
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project')
+ host_state = host_manager.HostState('host2', 'node2')
+ weighted_host = weights.WeighedHost(host_state, 1.42)
+ filter_properties = {}
+
+ uuid = 'fake-uuid1'
+ instance_properties = {'project_id': 1, 'os_type': 'Linux'}
+ request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
+ 'instance_properties': instance_properties,
+ 'instance_uuids': [uuid]}
+ sched._provision_resource(fake_context, weighted_host,
+ request_spec, filter_properties,
+ None, None, None, None)
diff --git a/nova/tests/unit/scheduler/test_filters.py b/nova/tests/unit/scheduler/test_filters.py
new file mode 100644
index 0000000000..6469829078
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_filters.py
@@ -0,0 +1,206 @@
+# Copyright 2012 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filters.
+"""
+
+import inspect
+import sys
+
+from nova import filters
+from nova import loadables
+from nova import test
+
+
+class Filter1(filters.BaseFilter):
+ """Test Filter class #1."""
+ pass
+
+
+class Filter2(filters.BaseFilter):
+ """Test Filter class #2."""
+ pass
+
+
+class FiltersTestCase(test.NoDBTestCase):
+ def test_filter_all(self):
+ filter_obj_list = ['obj1', 'obj2', 'obj3']
+ filter_properties = 'fake_filter_properties'
+ base_filter = filters.BaseFilter()
+
+ self.mox.StubOutWithMock(base_filter, '_filter_one')
+
+ base_filter._filter_one('obj1', filter_properties).AndReturn(True)
+ base_filter._filter_one('obj2', filter_properties).AndReturn(False)
+ base_filter._filter_one('obj3', filter_properties).AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ result = base_filter.filter_all(filter_obj_list, filter_properties)
+ self.assertTrue(inspect.isgenerator(result))
+ self.assertEqual(['obj1', 'obj3'], list(result))
+
+ def test_filter_all_recursive_yields(self):
+ # Test filter_all() allows generators from previous filter_all()s.
+ # filter_all() yields results. We want to make sure that we can
+ # call filter_all() with generators returned from previous calls
+ # to filter_all().
+ filter_obj_list = ['obj1', 'obj2', 'obj3']
+ filter_properties = 'fake_filter_properties'
+ base_filter = filters.BaseFilter()
+
+ self.mox.StubOutWithMock(base_filter, '_filter_one')
+
+ total_iterations = 200
+
+ # The order that _filter_one is going to get called gets
+ # confusing because we will be recursively yielding things..
+ # We are going to simulate the first call to filter_all()
+ # returning False for 'obj2'. So, 'obj1' will get yielded
+ # 'total_iterations' number of times before the first filter_all()
+ # call gets to processing 'obj2'. We then return 'False' for it.
+ # After that, 'obj3' gets yielded 'total_iterations' number of
+ # times.
+ for x in xrange(total_iterations):
+ base_filter._filter_one('obj1', filter_properties).AndReturn(True)
+ base_filter._filter_one('obj2', filter_properties).AndReturn(False)
+ for x in xrange(total_iterations):
+ base_filter._filter_one('obj3', filter_properties).AndReturn(True)
+ self.mox.ReplayAll()
+
+ objs = iter(filter_obj_list)
+ for x in xrange(total_iterations):
+ # Pass in generators returned from previous calls.
+ objs = base_filter.filter_all(objs, filter_properties)
+ self.assertTrue(inspect.isgenerator(objs))
+ self.assertEqual(['obj1', 'obj3'], list(objs))
+
+ def test_get_filtered_objects(self):
+ filter_objs_initial = ['initial', 'filter1', 'objects1']
+ filter_objs_second = ['second', 'filter2', 'objects2']
+ filter_objs_last = ['last', 'filter3', 'objects3']
+ filter_properties = 'fake_filter_properties'
+
+ def _fake_base_loader_init(*args, **kwargs):
+ pass
+
+ self.stubs.Set(loadables.BaseLoader, '__init__',
+ _fake_base_loader_init)
+
+ filt1_mock = self.mox.CreateMock(Filter1)
+ filt2_mock = self.mox.CreateMock(Filter2)
+
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt1_mock, 'filter_all')
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+
+ Filter1().AndReturn(filt1_mock)
+ filt1_mock.run_filter_for_index(0).AndReturn(True)
+ filt1_mock.filter_all(filter_objs_initial,
+ filter_properties).AndReturn(filter_objs_second)
+ Filter2().AndReturn(filt2_mock)
+ filt2_mock.run_filter_for_index(0).AndReturn(True)
+ filt2_mock.filter_all(filter_objs_second,
+ filter_properties).AndReturn(filter_objs_last)
+
+ self.mox.ReplayAll()
+
+ filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
+ filter_classes = [Filter1, Filter2]
+ result = filter_handler.get_filtered_objects(filter_classes,
+ filter_objs_initial,
+ filter_properties)
+ self.assertEqual(filter_objs_last, result)
+
+ def test_get_filtered_objects_for_index(self):
+ """Test that we don't call a filter when its
+ run_filter_for_index() method returns false
+ """
+ filter_objs_initial = ['initial', 'filter1', 'objects1']
+ filter_objs_second = ['second', 'filter2', 'objects2']
+ filter_properties = 'fake_filter_properties'
+
+ def _fake_base_loader_init(*args, **kwargs):
+ pass
+
+ self.stubs.Set(loadables.BaseLoader, '__init__',
+ _fake_base_loader_init)
+
+ filt1_mock = self.mox.CreateMock(Filter1)
+ filt2_mock = self.mox.CreateMock(Filter2)
+
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt1_mock, 'filter_all')
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+
+ Filter1().AndReturn(filt1_mock)
+ filt1_mock.run_filter_for_index(0).AndReturn(True)
+ filt1_mock.filter_all(filter_objs_initial,
+ filter_properties).AndReturn(filter_objs_second)
+ Filter2().AndReturn(filt2_mock)
+ # return false so filter_all will not be called
+ filt2_mock.run_filter_for_index(0).AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
+ filter_classes = [Filter1, Filter2]
+ filter_handler.get_filtered_objects(filter_classes,
+ filter_objs_initial,
+ filter_properties)
+
+ def test_get_filtered_objects_none_response(self):
+ filter_objs_initial = ['initial', 'filter1', 'objects1']
+ filter_properties = 'fake_filter_properties'
+
+ def _fake_base_loader_init(*args, **kwargs):
+ pass
+
+ self.stubs.Set(loadables.BaseLoader, '__init__',
+ _fake_base_loader_init)
+
+ filt1_mock = self.mox.CreateMock(Filter1)
+ filt2_mock = self.mox.CreateMock(Filter2)
+
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt1_mock, 'filter_all')
+ # Shouldn't be called.
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+
+ Filter1().AndReturn(filt1_mock)
+ filt1_mock.run_filter_for_index(0).AndReturn(True)
+ filt1_mock.filter_all(filter_objs_initial,
+ filter_properties).AndReturn(None)
+ self.mox.ReplayAll()
+
+ filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
+ filter_classes = [Filter1, Filter2]
+ result = filter_handler.get_filtered_objects(filter_classes,
+ filter_objs_initial,
+ filter_properties)
+ self.assertIsNone(result)
diff --git a/nova/tests/unit/scheduler/test_filters_utils.py b/nova/tests/unit/scheduler/test_filters_utils.py
new file mode 100644
index 0000000000..48792fae35
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_filters_utils.py
@@ -0,0 +1,44 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import utils
+from nova import test
+
+
+class UtilsTestCase(test.NoDBTestCase):
+ def test_validate_num_values(self):
+ f = utils.validate_num_values
+
+ self.assertEqual("x", f(set(), default="x"))
+ self.assertEqual(1, f(set(["1"]), cast_to=int))
+ self.assertEqual(1.0, f(set(["1"]), cast_to=float))
+ self.assertEqual(1, f(set([1, 2]), based_on=min))
+ self.assertEqual(2, f(set([1, 2]), based_on=max))
+
+ @mock.patch("nova.objects.aggregate.AggregateList.get_by_host")
+ def test_aggregate_values_from_db(self, get_by_host):
+ aggrA = mock.MagicMock()
+ aggrB = mock.MagicMock()
+ context = mock.MagicMock()
+
+ get_by_host.return_value = [aggrA, aggrB]
+ aggrA.metadata = {'k1': 1, 'k2': 2}
+ aggrB.metadata = {'k1': 3, 'k2': 4}
+
+ values = utils.aggregate_values_from_db(context, 'h1', key_name='k1')
+
+ self.assertTrue(context.elevated.called)
+ self.assertEqual(set([1, 3]), values)
diff --git a/nova/tests/unit/scheduler/test_host_filters.py b/nova/tests/unit/scheduler/test_host_filters.py
new file mode 100644
index 0000000000..caed938aa3
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_host_filters.py
@@ -0,0 +1,38 @@
+# Copyright 2011 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filters.
+"""
+
+from nova.scheduler import filters
+from nova.scheduler.filters import all_hosts_filter
+from nova.scheduler.filters import compute_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class HostFiltersTestCase(test.NoDBTestCase):
+
+ def test_filter_handler(self):
+ # Double check at least a couple of known filters exist
+ filter_handler = filters.HostFilterHandler()
+ classes = filter_handler.get_matching_classes(
+ ['nova.scheduler.filters.all_filters'])
+ self.assertIn(all_hosts_filter.AllHostsFilter, classes)
+ self.assertIn(compute_filter.ComputeFilter, classes)
+
+ def test_all_host_filter(self):
+ filt_cls = all_hosts_filter.AllHostsFilter()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(filt_cls.host_passes(host, {}))
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
new file mode 100644
index 0000000000..b891baf7b4
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -0,0 +1,545 @@
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For HostManager
+"""
+
+import mock
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import exception
+from nova.i18n import _LW
+from nova.scheduler import filters
+from nova.scheduler import host_manager
+from nova import test
+from nova.tests.unit import matchers
+from nova.tests.unit.scheduler import fakes
+from nova import utils
+from nova.virt import hardware
+
+
+class FakeFilterClass1(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class FakeFilterClass2(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class HostManagerTestCase(test.NoDBTestCase):
+ """Test case for HostManager class."""
+
+ def setUp(self):
+ super(HostManagerTestCase, self).setUp()
+ self.host_manager = host_manager.HostManager()
+ self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
+ 'fake-node') for x in xrange(1, 5)]
+ self.fake_hosts += [host_manager.HostState('fake_multihost',
+ 'fake-node%s' % x) for x in xrange(1, 5)]
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_choose_host_filters_not_found(self):
+ self.flags(scheduler_default_filters='FakeFilterClass3')
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+ self.assertRaises(exception.SchedulerHostFilterNotFound,
+ self.host_manager._choose_host_filters, None)
+
+ def test_choose_host_filters(self):
+ self.flags(scheduler_default_filters=['FakeFilterClass2'])
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(len(filter_classes), 1)
+ self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2')
+
+ def _mock_get_filtered_hosts(self, info, specified_filters=None):
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+
+ info['got_objs'] = []
+ info['got_fprops'] = []
+
+ def fake_filter_one(_self, obj, filter_props):
+ info['got_objs'].append(obj)
+ info['got_fprops'].append(filter_props)
+ return True
+
+ self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.host_manager._choose_host_filters(specified_filters).AndReturn(
+ [FakeFilterClass1])
+
+ def _verify_result(self, info, result, filters=True):
+ for x in info['got_fprops']:
+ self.assertEqual(x, info['expected_fprops'])
+ if filters:
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(info['expected_objs']), set(result))
+
+ def test_get_filtered_hosts(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_specified_filters(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info, specified_filters)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties, filter_class_names=specified_filters)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5', 'fake_multihost']}
+
+ # [1] and [3] are host2 and host4
+ info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [0] and [2] are host1 and host3
+ info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
+ # Ensure ignore_hosts processed before force_hosts in host filters.
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+
+ # only fake_host3 should be left.
+ info = {'expected_objs': [self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
+ # Ensure all nodes returned for a host with many nodes
+ fake_properties = {'force_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
+ self.fake_hosts[6], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_nodes(self):
+ fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
+ 'fake-node9']}
+
+ # [5] is fake-node2, [7] is fake-node4
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
+ # Ensure only overlapping results if both force host and node
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
+ 'force_nodes': ['fake-node2', 'fake-node9']}
+
+ # [5] is fake-node2
+ info = {'expected_objs': [self.fake_hosts[5]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
+ # Ensure non-overlapping force_node and force_host yield no result
+ fake_properties = {'force_hosts': ['fake_multihost'],
+ 'force_nodes': ['fake-node']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
+ # Ensure ignore_hosts can coexist with force_nodes
+ fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
+ 'ignore_hosts': ['fake_host1', 'fake_host2']}
+
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
+ # Ensure ignore_hosts is processed before force_nodes
+ fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
+ 'ignore_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_all_host_states(self):
+
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ self.mox.StubOutWithMock(host_manager.LOG, 'warn')
+
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # node 3 host physical disk space is greater than database
+ host_manager.LOG.warn(_LW("Host %(hostname)s has more disk space than "
+ "database expected (%(physical)sgb > "
+ "%(database)sgb)"),
+ {'physical': 3333, 'database': 3072,
+ 'hostname': 'node3'})
+ # Invalid service
+ host_manager.LOG.warn(_LW("No service for compute ID %s"), 5)
+
+ self.mox.ReplayAll()
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+
+ self.assertEqual(len(host_states_map), 4)
+ # Check that .service is set properly
+ for i in xrange(4):
+ compute_node = fakes.COMPUTE_NODES[i]
+ host = compute_node['service']['host']
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(host_states_map[state_key].service,
+ compute_node['service'])
+ self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
+ 512)
+ # 511GB
+ self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
+ 524288)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
+ 1024)
+ # 1023GB
+ self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
+ 1048576)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
+ 3072)
+ # 3071GB
+ self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
+ 3145728)
+ self.assertThat(
+ hardware.VirtNUMAHostTopology.from_json(
+ host_states_map[('host3', 'node3')].numa_topology
+ )._to_dict(),
+ matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict()))
+ self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
+ 8192)
+ # 8191GB
+ self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
+ 8388608)
+
+
+class HostManagerChangedNodesTestCase(test.NoDBTestCase):
+ """Test case for HostManager class."""
+
+ def setUp(self):
+ super(HostManagerChangedNodesTestCase, self).setUp()
+ self.host_manager = host_manager.HostManager()
+ self.fake_hosts = [
+ host_manager.HostState('host1', 'node1'),
+ host_manager.HostState('host2', 'node2'),
+ host_manager.HostState('host3', 'node3'),
+ host_manager.HostState('host4', 'node4')
+ ]
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_get_all_host_states(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 4)
+
+ def test_get_all_host_states_after_delete_one(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # remove node4 for second call
+ running_nodes = [n for n in fakes.COMPUTE_NODES
+ if n.get('hypervisor_hostname') != 'node4']
+ db.compute_node_get_all(context).AndReturn(running_nodes)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 3)
+
+ def test_get_all_host_states_after_delete_all(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # remove all nodes for second call
+ db.compute_node_get_all(context).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 0)
+
+
+class HostStateTestCase(test.NoDBTestCase):
+ """Test case for HostState class."""
+
+ # update_from_compute_node() and consume_from_instance() are tested
+ # in HostManagerTestCase.test_get_all_host_states()
+
+ def test_stat_consumption_from_compute_node(self):
+ stats = {
+ 'num_instances': '5',
+ 'num_proj_12345': '3',
+ 'num_proj_23456': '1',
+ 'num_vm_%s' % vm_states.BUILDING: '2',
+ 'num_vm_%s' % vm_states.SUSPENDED: '1',
+ 'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
+ 'num_task_%s' % task_states.MIGRATING: '2',
+ 'num_os_type_linux': '4',
+ 'num_os_type_windoze': '1',
+ 'io_workload': '42',
+ }
+ stats = jsonutils.dumps(stats)
+
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_type='htype',
+ hypervisor_hostname='hostname', cpu_info='cpu_info',
+ supported_instances='{}',
+ hypervisor_version=hyper_ver_int, numa_topology=None)
+
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+
+ self.assertEqual(5, host.num_instances)
+ self.assertEqual(42, host.num_io_ops)
+ self.assertEqual(10, len(host.stats))
+
+ self.assertEqual('127.0.0.1', host.host_ip)
+ self.assertEqual('htype', host.hypervisor_type)
+ self.assertEqual('hostname', host.hypervisor_hostname)
+ self.assertEqual('cpu_info', host.cpu_info)
+ self.assertEqual({}, host.supported_instances)
+ self.assertEqual(hyper_ver_int, host.hypervisor_version)
+
+ def test_stat_consumption_from_compute_node_non_pci(self):
+ stats = {
+ 'num_instances': '5',
+ 'num_proj_12345': '3',
+ 'num_proj_23456': '1',
+ 'num_vm_%s' % vm_states.BUILDING: '2',
+ 'num_vm_%s' % vm_states.SUSPENDED: '1',
+ 'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
+ 'num_task_%s' % task_states.MIGRATING: '2',
+ 'num_os_type_linux': '4',
+ 'num_os_type_windoze': '1',
+ 'io_workload': '42',
+ }
+ stats = jsonutils.dumps(stats)
+
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_version=hyper_ver_int, numa_topology=None)
+
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+ self.assertIsNone(host.pci_stats)
+ self.assertEqual(hyper_ver_int, host.hypervisor_version)
+
+ def test_stat_consumption_from_compute_node_rescue_unshelving(self):
+ stats = {
+ 'num_instances': '5',
+ 'num_proj_12345': '3',
+ 'num_proj_23456': '1',
+ 'num_vm_%s' % vm_states.BUILDING: '2',
+ 'num_vm_%s' % vm_states.SUSPENDED: '1',
+ 'num_task_%s' % task_states.UNSHELVING: '1',
+ 'num_task_%s' % task_states.RESCUING: '2',
+ 'num_os_type_linux': '4',
+ 'num_os_type_windoze': '1',
+ 'io_workload': '42',
+ }
+ stats = jsonutils.dumps(stats)
+
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_version=hyper_ver_int, numa_topology=None)
+
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+
+ self.assertEqual(5, host.num_instances)
+ self.assertEqual(42, host.num_io_ops)
+ self.assertEqual(10, len(host.stats))
+
+ self.assertIsNone(host.pci_stats)
+ self.assertEqual(hyper_ver_int, host.hypervisor_version)
+
+ @mock.patch('nova.virt.hardware.get_host_numa_usage_from_instance')
+ def test_stat_consumption_from_instance(self, numa_usage_mock):
+ numa_usage_mock.return_value = 'fake-consumed-once'
+ host = host_manager.HostState("fakehost", "fakenode")
+
+ instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
+ project_id='12345', vm_state=vm_states.BUILDING,
+ task_state=task_states.SCHEDULING, os_type='Linux',
+ uuid='fake-uuid')
+ host.consume_from_instance(instance)
+ numa_usage_mock.assert_called_once_with(host, instance)
+ self.assertEqual('fake-consumed-once', host.numa_topology)
+
+ numa_usage_mock.return_value = 'fake-consumed-twice'
+ instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
+ project_id='12345', vm_state=vm_states.PAUSED,
+ task_state=None, os_type='Linux',
+ uuid='fake-uuid')
+ host.consume_from_instance(instance)
+
+ self.assertEqual(2, host.num_instances)
+ self.assertEqual(1, host.num_io_ops)
+ self.assertEqual(2, numa_usage_mock.call_count)
+ self.assertEqual(((host, instance),), numa_usage_mock.call_args)
+ self.assertEqual('fake-consumed-twice', host.numa_topology)
+
+ def test_resources_consumption_from_compute_node(self):
+ metrics = [
+ dict(name='res1',
+ value=1.0,
+ source='source1',
+ timestamp=None),
+ dict(name='res2',
+ value="string2",
+ source='source2',
+ timestamp=None),
+ ]
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(metrics=jsonutils.dumps(metrics),
+ memory_mb=0, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_version=hyper_ver_int,
+ numa_topology=fakes.NUMA_TOPOLOGY.to_json())
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+
+ self.assertEqual(len(host.metrics), 2)
+ self.assertEqual(set(['res1', 'res2']), set(host.metrics.keys()))
+ self.assertEqual(1.0, host.metrics['res1'].value)
+ self.assertEqual('source1', host.metrics['res1'].source)
+ self.assertEqual('string2', host.metrics['res2'].value)
+ self.assertEqual('source2', host.metrics['res2'].source)
+ self.assertIsInstance(host.numa_topology, six.string_types)
diff --git a/nova/tests/unit/scheduler/test_ironic_host_manager.py b/nova/tests/unit/scheduler/test_ironic_host_manager.py
new file mode 100644
index 0000000000..50ec038cb3
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_ironic_host_manager.py
@@ -0,0 +1,430 @@
+# Copyright (c) 2014 OpenStack Foundation
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For IronicHostManager
+"""
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import db
+from nova import exception
+from nova.scheduler import filters
+from nova.scheduler import host_manager
+from nova.scheduler import ironic_host_manager
+from nova import test
+from nova.tests.unit.scheduler import ironic_fakes
+
+
+class FakeFilterClass1(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class FakeFilterClass2(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class IronicHostManagerTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+
+ def test_manager_public_api_signatures(self):
+ self.assertPublicAPISignatures(host_manager.HostManager(),
+ self.host_manager)
+
+ def test_state_public_api_signatures(self):
+ self.assertPublicAPISignatures(
+ host_manager.HostState("dummy",
+ "dummy"),
+ ironic_host_manager.IronicNodeState("dummy",
+ "dummy")
+ )
+
+ def test_get_all_host_states(self):
+ # Ensure .service is set and we have the values we expect to.
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+
+ self.assertEqual(len(host_states_map), 4)
+ for i in range(4):
+ compute_node = ironic_fakes.COMPUTE_NODES[i]
+ host = compute_node['service']['host']
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(compute_node['service'],
+ host_states_map[state_key].service)
+ self.assertEqual(jsonutils.loads(compute_node['stats']),
+ host_states_map[state_key].stats)
+ self.assertEqual(compute_node['free_ram_mb'],
+ host_states_map[state_key].free_ram_mb)
+ self.assertEqual(compute_node['free_disk_gb'] * 1024,
+ host_states_map[state_key].free_disk_mb)
+
+
+class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerChangedNodesTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ ironic_driver = "nova.virt.ironic.driver.IronicDriver"
+ supported_instances = '[["i386", "baremetal", "baremetal"]]'
+ self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ stats=jsonutils.dumps(dict(
+ ironic_driver=ironic_driver,
+ cpu_arch='i386')),
+ supported_instances=supported_instances,
+ free_disk_gb=10, free_ram_mb=1024,
+ hypervisor_type='ironic',
+ hypervisor_version = 1,
+ hypervisor_hostname = 'fake_host')
+
+ @mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
+ def test_create_ironic_node_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'baremetal cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
+
+ @mock.patch.object(host_manager.HostState, '__init__')
+ def test_create_non_ironic_host_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'other cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(host_manager.HostState, type(host_state))
+
+ def test_get_all_host_states_after_delete_one(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove node4 for second call
+ running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
+ if n.get('hypervisor_hostname') != 'node4uuid']
+ db.compute_node_get_all(context).AndReturn(running_nodes)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(3, len(host_states_map))
+
+ def test_get_all_host_states_after_delete_all(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove all nodes for second call
+ db.compute_node_get_all(context).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(0, len(host_states_map))
+
+ def test_update_from_compute_node(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ self.assertEqual(1024, host.free_ram_mb)
+ self.assertEqual(1024, host.total_usable_ram_mb)
+ self.assertEqual(10240, host.free_disk_mb)
+ self.assertEqual(1, host.vcpus_total)
+ self.assertEqual(0, host.vcpus_used)
+ self.assertEqual(jsonutils.loads(self.compute_node['stats']),
+ host.stats)
+ self.assertEqual('ironic', host.hypervisor_type)
+ self.assertEqual(1, host.hypervisor_version)
+ self.assertEqual('fake_host', host.hypervisor_hostname)
+
+ def test_consume_identical_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_larger_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_smaller_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+
+class IronicHostManagerTestFilters(test.NoDBTestCase):
+ """Test filters work for IronicHostManager."""
+
+ def setUp(self):
+ super(IronicHostManagerTestFilters, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ self.fake_hosts = [ironic_host_manager.IronicNodeState(
+ 'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
+ self.fake_hosts += [ironic_host_manager.IronicNodeState(
+ 'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
+
+ def test_choose_host_filters_not_found(self):
+ self.flags(scheduler_default_filters='FakeFilterClass3')
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+ self.assertRaises(exception.SchedulerHostFilterNotFound,
+ self.host_manager._choose_host_filters, None)
+
+ def test_choose_host_filters(self):
+ self.flags(scheduler_default_filters=['FakeFilterClass2'])
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(1, len(filter_classes))
+ self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
+
+ def _mock_get_filtered_hosts(self, info, specified_filters=None):
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+
+ info['got_objs'] = []
+ info['got_fprops'] = []
+
+ def fake_filter_one(_self, obj, filter_props):
+ info['got_objs'].append(obj)
+ info['got_fprops'].append(filter_props)
+ return True
+
+ self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.host_manager._choose_host_filters(specified_filters).AndReturn(
+ [FakeFilterClass1])
+
+ def _verify_result(self, info, result, filters=True):
+ for x in info['got_fprops']:
+ self.assertEqual(x, info['expected_fprops'])
+ if filters:
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(info['expected_objs']), set(result))
+
+ def test_get_filtered_hosts(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_specified_filters(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info, specified_filters)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties, filter_class_names=specified_filters)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5', 'fake_multihost']}
+
+ # [1] and [3] are host2 and host4
+ info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [0] and [2] are host1 and host3
+ info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
+ # Ensure ignore_hosts processed before force_hosts in host filters.
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+
+ # only fake_host3 should be left.
+ info = {'expected_objs': [self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
+ # Ensure all nodes returned for a host with many nodes
+ fake_properties = {'force_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
+ self.fake_hosts[6], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_nodes(self):
+ fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
+ 'fake-node9']}
+
+ # [5] is fake-node2, [7] is fake-node4
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
+ # Ensure only overlapping results if both force host and node
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
+ 'force_nodes': ['fake-node2', 'fake-node9']}
+
+ # [5] is fake-node2
+ info = {'expected_objs': [self.fake_hosts[5]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
+ # Ensure non-overlapping force_node and force_host yield no result
+ fake_properties = {'force_hosts': ['fake_multihost'],
+ 'force_nodes': ['fake-node']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
+ # Ensure ignore_hosts can coexist with force_nodes
+ fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
+ 'ignore_hosts': ['fake_host1', 'fake_host2']}
+
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
+ # Ensure ignore_hosts is processed before force_nodes
+ fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
+ 'ignore_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
diff --git a/nova/tests/unit/scheduler/test_rpcapi.py b/nova/tests/unit/scheduler/test_rpcapi.py
new file mode 100644
index 0000000000..0ba0feb540
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_rpcapi.py
@@ -0,0 +1,69 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for nova.scheduler.rpcapi
+"""
+
+import mox
+from oslo.config import cfg
+
+from nova import context
+from nova.scheduler import rpcapi as scheduler_rpcapi
+from nova import test
+
+CONF = cfg.CONF
+
+
+class SchedulerRpcAPITestCase(test.NoDBTestCase):
+ def _test_scheduler_api(self, method, rpc_method, **kwargs):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ rpcapi = scheduler_rpcapi.SchedulerAPI()
+ self.assertIsNotNone(rpcapi.client)
+ self.assertEqual(rpcapi.client.target.topic, CONF.scheduler_topic)
+
+ expected_retval = 'foo' if rpc_method == 'call' else None
+ expected_version = kwargs.pop('version', None)
+ expected_fanout = kwargs.pop('fanout', None)
+ expected_kwargs = kwargs.copy()
+
+ self.mox.StubOutWithMock(rpcapi, 'client')
+
+ rpcapi.client.can_send_version(
+ mox.IsA(str)).MultipleTimes().AndReturn(True)
+
+ prepare_kwargs = {}
+ if expected_fanout:
+ prepare_kwargs['fanout'] = True
+ if expected_version:
+ prepare_kwargs['version'] = expected_version
+ rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client)
+
+ rpc_method = getattr(rpcapi.client, rpc_method)
+
+ rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo')
+
+ self.mox.ReplayAll()
+
+ # NOTE(markmc): MultipleTimes() is OnceOrMore() not ZeroOrMore()
+ rpcapi.client.can_send_version('I fool you mox')
+
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+ self.assertEqual(retval, expected_retval)
+
+ def test_select_destinations(self):
+ self._test_scheduler_api('select_destinations', rpc_method='call',
+ request_spec='fake_request_spec',
+ filter_properties='fake_prop')
diff --git a/nova/tests/unit/scheduler/test_scheduler.py b/nova/tests/unit/scheduler/test_scheduler.py
new file mode 100644
index 0000000000..2435d60343
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_scheduler.py
@@ -0,0 +1,378 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler
+"""
+
+import mox
+from oslo.config import cfg
+
+from nova.compute import api as compute_api
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import glance
+from nova import objects
+from nova import rpc
+from nova.scheduler import driver
+from nova.scheduler import manager
+from nova import servicegroup
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit.objects import test_instance_fault
+from nova.tests.unit.scheduler import fakes
+
+CONF = cfg.CONF
+
+
+class SchedulerManagerTestCase(test.NoDBTestCase):
+ """Test case for scheduler manager."""
+
+ manager_cls = manager.SchedulerManager
+ driver_cls = driver.Scheduler
+ driver_cls_name = 'nova.scheduler.driver.Scheduler'
+
+ def setUp(self):
+ super(SchedulerManagerTestCase, self).setUp()
+ self.flags(scheduler_driver=self.driver_cls_name)
+ self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
+ self.manager = self.manager_cls()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.topic = 'fake_topic'
+ self.fake_args = (1, 2, 3)
+ self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
+ fake_server_actions.stub_out_action_events(self.stubs)
+
+ def test_1_correct_init(self):
+ # Correct scheduler driver
+ manager = self.manager
+ self.assertIsInstance(manager.driver, self.driver_cls)
+
+ def _mox_schedule_method_helper(self, method_name):
+ # Make sure the method exists that we're going to test call
+ def stub_method(*args, **kwargs):
+ pass
+
+ setattr(self.manager.driver, method_name, stub_method)
+
+ self.mox.StubOutWithMock(self.manager.driver,
+ method_name)
+
+ def test_run_instance_exception_puts_instance_in_error_state(self):
+ fake_instance_uuid = 'fake-instance-id'
+ inst = {"vm_state": "", "task_state": ""}
+
+ self._mox_schedule_method_helper('schedule_run_instance')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {'instance_properties': inst,
+ 'instance_uuids': [fake_instance_uuid]}
+
+ self.manager.driver.schedule_run_instance(self.context,
+ request_spec, None, None, None, None, {}, False).AndRaise(
+ exception.NoValidHost(reason=""))
+ old, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.ERROR,
+ "task_state": None}).AndReturn((inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context,
+ new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.manager.run_instance(self.context, request_spec,
+ None, None, None, None, {}, False)
+
+ def test_prep_resize_no_valid_host_back_in_active_state(self):
+ fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid}
+ inst = {"vm_state": "", "task_state": ""}
+
+ self._mox_schedule_method_helper('select_destinations')
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {'instance_type': 'fake_type',
+ 'instance_uuids': [fake_instance_uuid],
+ 'instance_properties': {'uuid': fake_instance_uuid}}
+ kwargs = {
+ 'context': self.context,
+ 'image': 'fake_image',
+ 'request_spec': request_spec,
+ 'filter_properties': 'fake_props',
+ 'instance': fake_instance,
+ 'instance_type': 'fake_type',
+ 'reservations': list('fake_res'),
+ }
+ self.manager.driver.select_destinations(
+ self.context, request_spec, 'fake_props').AndRaise(
+ exception.NoValidHost(reason=""))
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn(
+ (inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.manager.prep_resize(**kwargs)
+
+ def test_prep_resize_no_valid_host_back_in_shutoff_state(self):
+ fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid, "vm_state": "stopped"}
+ inst = {"vm_state": "stopped", "task_state": ""}
+
+ self._mox_schedule_method_helper('select_destinations')
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {'instance_type': 'fake_type',
+ 'instance_uuids': [fake_instance_uuid],
+ 'instance_properties': {'uuid': fake_instance_uuid}}
+ kwargs = {
+ 'context': self.context,
+ 'image': 'fake_image',
+ 'request_spec': request_spec,
+ 'filter_properties': 'fake_props',
+ 'instance': fake_instance,
+ 'instance_type': 'fake_type',
+ 'reservations': list('fake_res'),
+ }
+ self.manager.driver.select_destinations(
+ self.context, request_spec, 'fake_props').AndRaise(
+ exception.NoValidHost(reason=""))
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.STOPPED, "task_state": None}).AndReturn(
+ (inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.manager.prep_resize(**kwargs)
+
+ def test_prep_resize_exception_host_in_error_state_and_raise(self):
+ fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid}
+
+ self._mox_schedule_method_helper('select_destinations')
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {
+ 'instance_properties': {'uuid': fake_instance_uuid},
+ 'instance_uuids': [fake_instance_uuid]
+ }
+ kwargs = {
+ 'context': self.context,
+ 'image': 'fake_image',
+ 'request_spec': request_spec,
+ 'filter_properties': 'fake_props',
+ 'instance': fake_instance,
+ 'instance_type': 'fake_type',
+ 'reservations': list('fake_res'),
+ }
+
+ self.manager.driver.select_destinations(
+ self.context, request_spec, 'fake_props').AndRaise(
+ test.TestingException('something happened'))
+
+ inst = {
+ "vm_state": "",
+ "task_state": "",
+ }
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.ERROR,
+ "task_state": None}).AndReturn((inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(test.TestingException), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException, self.manager.prep_resize,
+ **kwargs)
+
+ def test_set_vm_state_and_notify_adds_instance_fault(self):
+ request = {'instance_properties': {'uuid': 'fake-uuid'}}
+ updates = {'vm_state': 'foo'}
+ fake_inst = {'uuid': 'fake-uuid'}
+
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ self.mox.StubOutWithMock(rpc, 'get_notifier')
+ notifier = self.mox.CreateMockAnything()
+ rpc.get_notifier('scheduler').AndReturn(notifier)
+ db.instance_update_and_get_original(self.context, 'fake-uuid',
+ updates).AndReturn((None,
+ fake_inst))
+ db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ notifier.error(self.context, 'scheduler.foo', mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.manager._set_vm_state_and_notify('foo', {'vm_state': 'foo'},
+ self.context, None, request)
+
+ def test_prep_resize_post_populates_retry(self):
+ self.manager.driver = fakes.FakeFilterScheduler()
+
+ image = 'image'
+ instance_uuid = 'fake-instance-id'
+ instance = fake_instance.fake_db_instance(uuid=instance_uuid)
+
+ instance_properties = {'project_id': 'fake', 'os_type': 'Linux'}
+ instance_type = "m1.tiny"
+ request_spec = {'instance_properties': instance_properties,
+ 'instance_type': instance_type,
+ 'instance_uuids': [instance_uuid]}
+ retry = {'hosts': [], 'num_attempts': 1}
+ filter_properties = {'retry': retry}
+ reservations = None
+
+ hosts = [dict(host='host', nodename='node', limits={})]
+
+ self._mox_schedule_method_helper('select_destinations')
+ self.manager.driver.select_destinations(
+ self.context, request_spec, filter_properties).AndReturn(hosts)
+
+ self.mox.StubOutWithMock(self.manager.compute_rpcapi, 'prep_resize')
+ self.manager.compute_rpcapi.prep_resize(self.context, image,
+ mox.IsA(objects.Instance),
+ instance_type, 'host', reservations, request_spec=request_spec,
+ filter_properties=filter_properties, node='node')
+
+ self.mox.ReplayAll()
+ self.manager.prep_resize(self.context, image, request_spec,
+ filter_properties, instance, instance_type, reservations)
+
+ self.assertEqual([['host', 'node']],
+ filter_properties['retry']['hosts'])
+
+
+class SchedulerTestCase(test.NoDBTestCase):
+ """Test case for base scheduler driver class."""
+
+ # So we can subclass this test and re-use tests if we need.
+ driver_cls = driver.Scheduler
+
+ def setUp(self):
+ super(SchedulerTestCase, self).setUp()
+ self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
+
+ def fake_show(meh, context, id, **kwargs):
+ if id:
+ return {'id': id, 'min_disk': None, 'min_ram': None,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}}
+ else:
+ raise exception.ImageNotFound(image_id=id)
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.image_service = glance.get_default_image_service()
+
+ self.driver = self.driver_cls()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.topic = 'fake_topic'
+ self.servicegroup_api = servicegroup.API()
+
+ def test_hosts_up(self):
+ service1 = {'host': 'host1'}
+ service2 = {'host': 'host2'}
+ services = [service1, service2]
+
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
+
+ db.service_get_all_by_topic(self.context,
+ self.topic).AndReturn(services)
+ self.servicegroup_api.service_is_up(service1).AndReturn(False)
+ self.servicegroup_api.service_is_up(service2).AndReturn(True)
+
+ self.mox.ReplayAll()
+ result = self.driver.hosts_up(self.context, self.topic)
+ self.assertEqual(result, ['host2'])
+
+ def test_handle_schedule_error_adds_instance_fault(self):
+ instance = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ mox.IgnoreArg()).AndReturn(
+ (None, instance))
+ db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ self.mox.StubOutWithMock(rpc, 'get_notifier')
+ notifier = self.mox.CreateMockAnything()
+ rpc.get_notifier('scheduler').AndReturn(notifier)
+ notifier.error(self.context, 'scheduler.run_instance', mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ driver.handle_schedule_error(self.context,
+ exception.NoValidHost('test'),
+ instance['uuid'], {})
+
+
+class SchedulerDriverBaseTestCase(SchedulerTestCase):
+ """Test cases for base scheduler driver class methods
+ that will fail if the driver is changed.
+ """
+
+ def test_unimplemented_schedule_run_instance(self):
+ fake_request_spec = {'instance_properties':
+ {'uuid': 'uuid'}}
+
+ self.assertRaises(NotImplementedError,
+ self.driver.schedule_run_instance,
+ self.context, fake_request_spec, None, None, None,
+ None, None, False)
+
+ def test_unimplemented_select_destinations(self):
+ self.assertRaises(NotImplementedError,
+ self.driver.select_destinations, self.context, {}, {})
+
+
+class SchedulerInstanceGroupData(test.TestCase):
+
+ driver_cls = driver.Scheduler
+
+ def setUp(self):
+ super(SchedulerInstanceGroupData, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.driver = self.driver_cls()
+
+ def _get_default_values(self):
+ return {'name': 'fake_name',
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
+
+ def _create_instance_group(self, context, values, policies=None,
+ metadata=None, members=None):
+ return db.instance_group_create(context, values, policies=policies,
+ metadata=metadata, members=members)
diff --git a/nova/tests/unit/scheduler/test_scheduler_options.py b/nova/tests/unit/scheduler/test_scheduler_options.py
new file mode 100644
index 0000000000..29d42ccd2f
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_scheduler_options.py
@@ -0,0 +1,138 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For PickledScheduler.
+"""
+
+import datetime
+import StringIO
+
+from oslo.serialization import jsonutils
+
+from nova.scheduler import scheduler_options
+from nova import test
+
+
+class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
+ def __init__(self, last_checked, now, file_old, file_now, data, filedata):
+ super(FakeSchedulerOptions, self).__init__()
+ # Change internals ...
+ self.last_modified = file_old
+ self.last_checked = last_checked
+ self.data = data
+
+ # For overrides ...
+ self._time_now = now
+ self._file_now = file_now
+ self._file_data = filedata
+
+ self.file_was_loaded = False
+
+ def _get_file_timestamp(self, filename):
+ return self._file_now
+
+ def _get_file_handle(self, filename):
+ self.file_was_loaded = True
+ return StringIO.StringIO(self._file_data)
+
+ def _get_time_now(self):
+ return self._time_now
+
+
+class SchedulerOptionsTestCase(test.NoDBTestCase):
+ def test_get_configuration_first_time_no_flag(self):
+ last_checked = None
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = None
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ {}, jdata)
+ self.assertEqual({}, fake.get_configuration())
+ self.assertFalse(fake.file_was_loaded)
+
+ def test_get_configuration_first_time_empty_file(self):
+ last_checked = None
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = None
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ jdata = ""
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ {}, jdata)
+ self.assertEqual({}, fake.get_configuration('foo.json'))
+ self.assertTrue(fake.file_was_loaded)
+
+ def test_get_configuration_first_time_happy_day(self):
+ last_checked = None
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = None
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ {}, jdata)
+ self.assertEqual(data, fake.get_configuration('foo.json'))
+ self.assertTrue(fake.file_was_loaded)
+
+ def test_get_configuration_second_time_no_change(self):
+ last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ data, jdata)
+ self.assertEqual(data, fake.get_configuration('foo.json'))
+ self.assertFalse(fake.file_was_loaded)
+
+ def test_get_configuration_second_time_too_fast(self):
+ last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
+ now = datetime.datetime(2011, 1, 1, 1, 1, 2)
+ file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
+
+ old_data = dict(a=1, b=2, c=3)
+ data = dict(a=11, b=12, c=13)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ old_data, jdata)
+ self.assertEqual(old_data, fake.get_configuration('foo.json'))
+ self.assertFalse(fake.file_was_loaded)
+
+ def test_get_configuration_second_time_change(self):
+ last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
+
+ old_data = dict(a=1, b=2, c=3)
+ data = dict(a=11, b=12, c=13)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ old_data, jdata)
+ self.assertEqual(data, fake.get_configuration('foo.json'))
+ self.assertTrue(fake.file_was_loaded)
diff --git a/nova/tests/unit/scheduler/test_scheduler_utils.py b/nova/tests/unit/scheduler/test_scheduler_utils.py
new file mode 100644
index 0000000000..0dfade7deb
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_scheduler_utils.py
@@ -0,0 +1,314 @@
+# Copyright (c) 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Utils
+"""
+import contextlib
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+
+from nova.compute import flavors
+from nova.compute import utils as compute_utils
+from nova import db
+from nova import exception
+from nova import notifications
+from nova import objects
+from nova import rpc
+from nova.scheduler import utils as scheduler_utils
+from nova import test
+from nova.tests.unit import fake_instance
+
+CONF = cfg.CONF
+
+
+class SchedulerUtilsTestCase(test.NoDBTestCase):
+ """Test case for scheduler utils methods."""
+ def setUp(self):
+ super(SchedulerUtilsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ def test_build_request_spec_without_image(self):
+ image = None
+ instance = {'uuid': 'fake-uuid'}
+ instance_type = {'flavorid': 'fake-id'}
+
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
+ flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
+ db.flavor_extra_specs_get(self.context, mox.IgnoreArg()).AndReturn([])
+ self.mox.ReplayAll()
+
+ request_spec = scheduler_utils.build_request_spec(self.context, image,
+ [instance])
+ self.assertEqual({}, request_spec['image'])
+
+ @mock.patch.object(flavors, 'extract_flavor')
+ @mock.patch.object(db, 'flavor_extra_specs_get')
+ def test_build_request_spec_with_object(self, flavor_extra_specs_get,
+ extract_flavor):
+ instance_type = {'flavorid': 'fake-id'}
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ extract_flavor.return_value = instance_type
+ flavor_extra_specs_get.return_value = []
+
+ request_spec = scheduler_utils.build_request_spec(self.context, None,
+ [instance])
+ self.assertIsInstance(request_spec['instance_properties'], dict)
+
+ def _test_set_vm_state_and_notify(self, request_spec,
+ expected_uuids):
+ updates = dict(vm_state='fake-vm-state')
+ service = 'fake-service'
+ method = 'fake-method'
+ exc_info = 'exc_info'
+
+ self.mox.StubOutWithMock(compute_utils,
+ 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(notifications, 'send_update')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ self.mox.StubOutWithMock(rpc, 'get_notifier')
+ notifier = self.mox.CreateMockAnything()
+ rpc.get_notifier(service).AndReturn(notifier)
+
+ old_ref = 'old_ref'
+ new_ref = 'new_ref'
+
+ for _uuid in expected_uuids:
+ db.instance_update_and_get_original(
+ self.context, _uuid, updates).AndReturn((old_ref, new_ref))
+ notifications.send_update(self.context, old_ref, new_ref,
+ service=service)
+ compute_utils.add_instance_fault_from_exc(
+ self.context,
+ new_ref, exc_info, mox.IsA(tuple))
+
+ payload = dict(request_spec=request_spec,
+ instance_properties=request_spec.get(
+ 'instance_properties', {}),
+ instance_id=_uuid,
+ state='fake-vm-state',
+ method=method,
+ reason=exc_info)
+ event_type = '%s.%s' % (service, method)
+ notifier.error(self.context, event_type, payload)
+
+ self.mox.ReplayAll()
+
+ scheduler_utils.set_vm_state_and_notify(self.context,
+ service,
+ method,
+ updates,
+ exc_info,
+ request_spec,
+ db)
+
+ def test_set_vm_state_and_notify_rs_uuids(self):
+ expected_uuids = ['1', '2', '3']
+ request_spec = dict(instance_uuids=expected_uuids)
+ self._test_set_vm_state_and_notify(request_spec, expected_uuids)
+
+ def test_set_vm_state_and_notify_uuid_from_instance_props(self):
+ expected_uuids = ['fake-uuid']
+ request_spec = dict(instance_properties=dict(uuid='fake-uuid'))
+ self._test_set_vm_state_and_notify(request_spec, expected_uuids)
+
+ def _test_populate_filter_props(self, host_state_obj=True,
+ with_retry=True,
+ force_hosts=None,
+ force_nodes=None):
+ if force_hosts is None:
+ force_hosts = []
+ if force_nodes is None:
+ force_nodes = []
+ if with_retry:
+ if not force_hosts and not force_nodes:
+ filter_properties = dict(retry=dict(hosts=[]))
+ else:
+ filter_properties = dict(force_hosts=force_hosts,
+ force_nodes=force_nodes)
+ else:
+ filter_properties = dict()
+
+ if host_state_obj:
+ class host_state(object):
+ host = 'fake-host'
+ nodename = 'fake-node'
+ limits = 'fake-limits'
+ else:
+ host_state = dict(host='fake-host',
+ nodename='fake-node',
+ limits='fake-limits')
+
+ scheduler_utils.populate_filter_properties(filter_properties,
+ host_state)
+ if with_retry and not force_hosts and not force_nodes:
+ # So we can check for 2 hosts
+ scheduler_utils.populate_filter_properties(filter_properties,
+ host_state)
+
+ if force_hosts:
+ expected_limits = None
+ else:
+ expected_limits = 'fake-limits'
+ self.assertEqual(expected_limits,
+ filter_properties.get('limits'))
+
+ if with_retry and not force_hosts and not force_nodes:
+ self.assertEqual([['fake-host', 'fake-node'],
+ ['fake-host', 'fake-node']],
+ filter_properties['retry']['hosts'])
+ else:
+ self.assertNotIn('retry', filter_properties)
+
+ def test_populate_filter_props(self):
+ self._test_populate_filter_props()
+
+ def test_populate_filter_props_host_dict(self):
+ self._test_populate_filter_props(host_state_obj=False)
+
+ def test_populate_filter_props_no_retry(self):
+ self._test_populate_filter_props(with_retry=False)
+
+ def test_populate_filter_props_force_hosts_no_retry(self):
+ self._test_populate_filter_props(force_hosts=['force-host'])
+
+ def test_populate_filter_props_force_nodes_no_retry(self):
+ self._test_populate_filter_props(force_nodes=['force-node'])
+
+ @mock.patch.object(scheduler_utils, '_max_attempts')
+ def test_populate_retry_exception_at_max_attempts(self, _max_attempts):
+ _max_attempts.return_value = 2
+ msg = 'The exception text was preserved!'
+ filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
+ exc=[msg]))
+ nvh = self.assertRaises(exception.NoValidHost,
+ scheduler_utils.populate_retry,
+ filter_properties, 'fake-uuid')
+ # make sure 'msg' is a substring of the complete exception text
+ self.assertIn(msg, nvh.message)
+
+ def _check_parse_options(self, opts, sep, converter, expected):
+ good = scheduler_utils.parse_options(opts,
+ sep=sep,
+ converter=converter)
+ for item in expected:
+ self.assertIn(item, good)
+
+ def test_parse_options(self):
+ # check normal
+ self._check_parse_options(['foo=1', 'bar=-2.1'],
+ '=',
+ float,
+ [('foo', 1.0), ('bar', -2.1)])
+ # check convert error
+ self._check_parse_options(['foo=a1', 'bar=-2.1'],
+ '=',
+ float,
+ [('bar', -2.1)])
+ # check separator missing
+ self._check_parse_options(['foo', 'bar=-2.1'],
+ '=',
+ float,
+ [('bar', -2.1)])
+ # check key missing
+ self._check_parse_options(['=5', 'bar=-2.1'],
+ '=',
+ float,
+ [('bar', -2.1)])
+
+ def test_validate_filters_configured(self):
+ self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
+ self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
+ self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
+ self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
+
+ def _create_server_group(self, policy='anti-affinity'):
+ instance = fake_instance.fake_instance_obj(self.context,
+ params={'host': 'hostA'})
+
+ group = objects.InstanceGroup()
+ group.name = 'pele'
+ group.uuid = str(uuid.uuid4())
+ group.members = [instance.uuid]
+ group.policies = [policy]
+ return group
+
+ def _group_details_in_filter_properties(self, group, func='get_by_uuid',
+ hint=None, policy=None):
+ group_hint = hint
+ group_hosts = ['hostB']
+
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceGroup, func, return_value=group),
+ mock.patch.object(objects.InstanceGroup, 'get_hosts',
+ return_value=['hostA']),
+ ) as (get_group, get_hosts):
+ scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
+ scheduler_utils._SUPPORTS_AFFINITY = None
+ group_info = scheduler_utils.setup_instance_group(
+ self.context, group_hint, group_hosts)
+ self.assertEqual(
+ (set(['hostA', 'hostB']), [policy]),
+ group_info)
+
+ def test_group_details_in_filter_properties(self):
+ for policy in ['affinity', 'anti-affinity']:
+ group = self._create_server_group(policy)
+ self._group_details_in_filter_properties(group, func='get_by_uuid',
+ hint=group.uuid,
+ policy=policy)
+
+ def _group_filter_with_filter_not_configured(self, policy):
+ self.flags(scheduler_default_filters=['f1', 'f2'])
+
+ instance = fake_instance.fake_instance_obj(self.context,
+ params={'host': 'hostA'})
+
+ group = objects.InstanceGroup()
+ group.uuid = str(uuid.uuid4())
+ group.members = [instance.uuid]
+ group.policies = [policy]
+
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceGroup, 'get_by_uuid',
+ return_value=group),
+ mock.patch.object(objects.InstanceGroup, 'get_hosts',
+ return_value=['hostA']),
+ ) as (get_group, get_hosts):
+ scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
+ scheduler_utils._SUPPORTS_AFFINITY = None
+ self.assertRaises(exception.NoValidHost,
+ scheduler_utils.setup_instance_group,
+ self.context, group.uuid)
+
+ def test_group_filter_with_filter_not_configured(self):
+ policies = ['anti-affinity', 'affinity']
+ for policy in policies:
+ self._group_filter_with_filter_not_configured(policy)
+
+ def test_group_uuid_details_in_filter_properties(self):
+ group = self._create_server_group()
+ self._group_details_in_filter_properties(group, 'get_by_uuid',
+ group.uuid, 'anti-affinity')
+
+ def test_group_name_details_in_filter_properties(self):
+ group = self._create_server_group()
+ self._group_details_in_filter_properties(group, 'get_by_name',
+ group.name, 'anti-affinity')
diff --git a/nova/tests/unit/scheduler/test_weights.py b/nova/tests/unit/scheduler/test_weights.py
new file mode 100644
index 0000000000..5f168bf5df
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_weights.py
@@ -0,0 +1,338 @@
+# Copyright 2011-2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler weights.
+"""
+
+from oslo.serialization import jsonutils
+
+from nova import context
+from nova import exception
+from nova.openstack.common.fixture import mockpatch
+from nova.scheduler import weights
+from nova import test
+from nova.tests.unit import matchers
+from nova.tests.unit.scheduler import fakes
+
+
+class TestWeighedHost(test.NoDBTestCase):
+ def test_dict_conversion(self):
+ host_state = fakes.FakeHostState('somehost', None, {})
+ host = weights.WeighedHost(host_state, 'someweight')
+ expected = {'weight': 'someweight',
+ 'host': 'somehost'}
+ self.assertThat(host.to_dict(), matchers.DictMatches(expected))
+
+ def test_all_weighers(self):
+ classes = weights.all_weighers()
+ class_names = [cls.__name__ for cls in classes]
+ self.assertIn('RAMWeigher', class_names)
+ self.assertIn('MetricsWeigher', class_names)
+ self.assertIn('IoOpsWeigher', class_names)
+
+
+class RamWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(RamWeigherTestCase, self).setUp()
+ self.useFixture(mockpatch.Patch(
+ 'nova.db.compute_node_get_all',
+ return_value=fakes.COMPUTE_NODES))
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.ram.RAMWeigher'])
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ return self.host_manager.get_all_host_states(ctxt)
+
+ def test_default_of_spreading_first(self):
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_ram_filter_multiplier1(self):
+ self.flags(ram_weight_multiplier=0.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # We do not know the host, all have same weight.
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(0.0, weighed_host.weight)
+
+ def test_ram_filter_multiplier2(self):
+ self.flags(ram_weight_multiplier=2.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0 * 2, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_ram_filter_negative(self):
+ self.flags(ram_weight_multiplier=1.0)
+ hostinfo_list = self._get_all_hosts()
+ host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
+ host_state = fakes.FakeHostState('negative', 'negative', host_attr)
+ hostinfo_list = list(hostinfo_list) + [host_state]
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+ # negativehost: free_ram_mb=-512
+
+ # so, host4 should win
+ weights = self.weight_handler.get_weighed_objects(self.weight_classes,
+ hostinfo_list, {})
+
+ weighed_host = weights[0]
+ self.assertEqual(1, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ # and negativehost should lose
+ weighed_host = weights[-1]
+ self.assertEqual(0, weighed_host.weight)
+ self.assertEqual('negative', weighed_host.obj.host)
+
+
+class MetricsWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(MetricsWeigherTestCase, self).setUp()
+ self.useFixture(mockpatch.Patch(
+ 'nova.db.compute_node_get_all',
+ return_value=fakes.COMPUTE_NODES_METRICS))
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.metrics.MetricsWeigher'])
+
+ def _get_weighed_host(self, hosts, setting, weight_properties=None):
+ if not weight_properties:
+ weight_properties = {}
+ self.flags(weight_setting=setting, group='metrics')
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ return self.host_manager.get_all_host_states(ctxt)
+
+ def _do_test(self, settings, expected_weight, expected_host):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list, settings)
+ self.assertEqual(expected_weight, weighed_host.weight)
+ self.assertEqual(expected_host, weighed_host.obj.host)
+
+ def test_single_resource(self):
+ # host1: foo=512
+ # host2: foo=1024
+ # host3: foo=3072
+ # host4: foo=8192
+ # so, host4 should win:
+ setting = ['foo=1']
+ self._do_test(setting, 1.0, 'host4')
+
+ def test_multiple_resource(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # so, host2 should win:
+ setting = ['foo=0.0001', 'bar=1']
+ self._do_test(setting, 1.0, 'host2')
+
+ def test_single_resourcenegtive_ratio(self):
+ # host1: foo=512
+ # host2: foo=1024
+ # host3: foo=3072
+ # host4: foo=8192
+ # so, host1 should win:
+ setting = ['foo=-1']
+ self._do_test(setting, 1.0, 'host1')
+
+ def test_multiple_resource_missing_ratio(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # so, host4 should win:
+ setting = ['foo=0.0001', 'bar']
+ self._do_test(setting, 1.0, 'host4')
+
+ def test_multiple_resource_wrong_ratio(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # so, host4 should win:
+ setting = ['foo=0.0001', 'bar = 2.0t']
+ self._do_test(setting, 1.0, 'host4')
+
+ def _check_parsing_result(self, weigher, setting, results):
+ self.flags(weight_setting=setting, group='metrics')
+ weigher._parse_setting()
+ self.assertEqual(len(weigher.setting), len(results))
+ for item in results:
+ self.assertIn(item, weigher.setting)
+
+ def test_parse_setting(self):
+ weigher = self.weight_classes[0]()
+ self._check_parsing_result(weigher,
+ ['foo=1'],
+ [('foo', 1.0)])
+ self._check_parsing_result(weigher,
+ ['foo=1', 'bar=-2.1'],
+ [('foo', 1.0), ('bar', -2.1)])
+ self._check_parsing_result(weigher,
+ ['foo=a1', 'bar=-2.1'],
+ [('bar', -2.1)])
+ self._check_parsing_result(weigher,
+ ['foo', 'bar=-2.1'],
+ [('bar', -2.1)])
+ self._check_parsing_result(weigher,
+ ['=5', 'bar=-2.1'],
+ [('bar', -2.1)])
+
+ def test_metric_not_found_required(self):
+ setting = ['foo=1', 'zot=2']
+ self.assertRaises(exception.ComputeHostMetricNotFound,
+ self._do_test,
+ setting,
+ 8192,
+ 'host4')
+
+ def test_metric_not_found_non_required(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # host5: foo=768, bar=0, zot=1
+ # host6: foo=2048, bar=0, zot=2
+ # so, host5 should win:
+ self.flags(required=False, group='metrics')
+ setting = ['foo=0.0001', 'zot=-1']
+ self._do_test(setting, 1.0, 'host5')
+
+
+COMPUTE_NODES_IO_OPS = [
+ # host1: num_io_ops=1
+ dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
+ disk_available_least=None, free_ram_mb=512, vcpus_used=1,
+ free_disk_gb=512, local_gb_used=0, updated_at=None,
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '1'})),
+ # host2: num_io_ops=2
+ dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
+ disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
+ free_disk_gb=1024, local_gb_used=0, updated_at=None,
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '2'})),
+ # host3: num_io_ops=0, so host3 should win in the case of default
+ # io_ops_weight_multiplier configure.
+ dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
+ disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
+ free_disk_gb=3072, local_gb_used=0, updated_at=None,
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '0'})),
+ # host4: num_io_ops=4, so host4 should win in the case of positive
+ # io_ops_weight_multiplier configure.
+ dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
+ disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
+ free_disk_gb=8888, local_gb_used=0, updated_at=None,
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '4'})),
+ # Broken entry
+ dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
+]
+
+
+class IoOpsWeigherTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IoOpsWeigherTestCase, self).setUp()
+ self.useFixture(mockpatch.Patch(
+ 'nova.db.compute_node_get_all',
+ return_value=COMPUTE_NODES_IO_OPS))
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.io_ops.IoOpsWeigher'])
+
+ def _get_weighed_host(self, hosts, io_ops_weight_multiplier):
+ if io_ops_weight_multiplier is not None:
+ self.flags(io_ops_weight_multiplier=io_ops_weight_multiplier)
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, {})[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ return self.host_manager.get_all_host_states(ctxt)
+
+ def _do_test(self, io_ops_weight_multiplier, expected_weight,
+ expected_host):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list,
+ io_ops_weight_multiplier)
+ self.assertEqual(weighed_host.weight, expected_weight)
+ if expected_host:
+ self.assertEqual(weighed_host.obj.host, expected_host)
+
+ def test_io_ops_weight_multiplier_by_default(self):
+ self._do_test(io_ops_weight_multiplier=None,
+ expected_weight=0.0,
+ expected_host='host3')
+
+ def test_io_ops_weight_multiplier_zero_value(self):
+ # We do not know the host, all have same weight.
+ self._do_test(io_ops_weight_multiplier=0.0,
+ expected_weight=0.0,
+ expected_host=None)
+
+ def test_io_ops_weight_multiplier_positive_value(self):
+ self._do_test(io_ops_weight_multiplier=2.0,
+ expected_weight=2.0,
+ expected_host='host4')