summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2023-05-09 12:46:37 +0000
committerGerrit Code Review <review@openstack.org>2023-05-09 12:46:37 +0000
commit0c397d60e79e47da05fd9dcee173514b2b8dc2cc (patch)
tree3f2477d775343b0780dc17e838b4fd9b6efe36c4
parent07b7db090d6575e06d82b10757f91200c1ee6616 (diff)
parentcffe3971ce585a1ddc374a3ed067347857338831 (diff)
downloadnova-0c397d60e79e47da05fd9dcee173514b2b8dc2cc.tar.gz
Merge "Handle zero pinned CPU in a cell with mixed policy"
-rw-r--r--nova/tests/functional/libvirt/test_numa_servers.py42
-rw-r--r--nova/virt/hardware.py10
2 files changed, 25 insertions, 27 deletions
diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py
index f021869722..5b73e1b965 100644
--- a/nova/tests/functional/libvirt/test_numa_servers.py
+++ b/nova/tests/functional/libvirt/test_numa_servers.py
@@ -391,34 +391,30 @@ class NUMAServersTest(NUMAServersTestBase):
}
flavor_id = self._create_flavor(
vcpu=3, memory_mb=1024, extra_spec=extra_spec)
+ expected_usage = {
+ 'DISK_GB': 20, 'MEMORY_MB': 1024, 'PCPU': 2, 'VCPU': 1,
+ }
# The only possible solution (ignoring the order of vCPU1,2):
# vCPU 0 => pCPU 0, NUMA0, shared
# vCPU 1 => pCPU 6, NUMA1, dedicated
# vCPU 2 => pCPU 7, NUMA1, dedicated
- # This is bug 1994526 as the scheduling fails
- self._run_build_test(flavor_id, end_status='ERROR')
+ server = self._run_build_test(
+ flavor_id, expected_usage=expected_usage)
- # # After bug 1994526 is fixed, this should pass
- # expected_usage = {
- # 'DISK_GB': 20, 'MEMORY_MB': 1024, 'PCPU': 2, 'VCPU': 1,
- # }
- # server = self._run_build_test(
- # flavor_id, expected_usage=expected_usage)
- #
- # # sanity check the instance topology
- # inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
- # self.assertEqual(2, len(inst.numa_topology.cells))
- #
- # self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
- # self.assertEqual(set(), inst.numa_topology.cells[0].pcpuset)
- # self.assertEqual(None, inst.numa_topology.cells[0].cpu_pinning)
- #
- # self.assertEqual(set(), inst.numa_topology.cells[1].cpuset)
- # self.assertEqual({1, 2}, inst.numa_topology.cells[1].pcpuset)
- # self.assertEqual(
- # {6, 7},
- # set(inst.numa_topology.cells[1].cpu_pinning.values())
- # )
+ # sanity check the instance topology
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ self.assertEqual(2, len(inst.numa_topology.cells))
+
+ self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
+ self.assertEqual(set(), inst.numa_topology.cells[0].pcpuset)
+ self.assertIsNone(inst.numa_topology.cells[0].cpu_pinning)
+
+ self.assertEqual(set(), inst.numa_topology.cells[1].cpuset)
+ self.assertEqual({1, 2}, inst.numa_topology.cells[1].pcpuset)
+ self.assertEqual(
+ {6, 7},
+ set(inst.numa_topology.cells[1].cpu_pinning.values())
+ )
def test_create_server_with_dedicated_policy_old_configuration(self):
"""Create a server using the legacy extra spec and configuration.
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 292536735a..9693e405d3 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -869,7 +869,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
continue
break
@@ -895,7 +895,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
cpuset_reserved = _get_reserved(
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
return
LOG.debug('Selected cores for pinning: %s, in cell %s', pinning,
host_cell.id)
@@ -2608,8 +2608,10 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
None, fields.CPUAllocationPolicy.SHARED,
):
continue
-
- pinned_cpus = set(instance_cell.cpu_pinning.values())
+ if instance_cell.cpu_pinning:
+ pinned_cpus = set(instance_cell.cpu_pinning.values())
+ else:
+ pinned_cpus = set()
if instance_cell.cpuset_reserved:
pinned_cpus |= instance_cell.cpuset_reserved