1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova import objects
from nova.objects import fields
from nova.scheduler import filters
from nova.virt import hardware
LOG = logging.getLogger(__name__)
class NUMATopologyFilter(filters.BaseHostFilter):
"""Filter on requested NUMA topology."""
# NOTE(sean-k-mooney): In change I0322d872bdff68936033a6f5a54e8296a6fb343
# we validate that the NUMA topology does not change in the api. If the
# requested image would alter the NUMA constraints we reject the rebuild
# request and therefore do not need to run this filter on rebuild.
RUN_ON_REBUILD = False
def _satisfies_cpu_policy(self, host_state, extra_specs, image_props):
"""Check that the host_state provided satisfies any available
CPU policy requirements.
"""
host_topology = host_state.numa_topology
# NOTE(stephenfin): There can be conflicts between the policy
# specified by the image and that specified by the instance, but this
# is not the place to resolve these. We do this during scheduling.
cpu_policy = [extra_specs.get('hw:cpu_policy'),
image_props.get('hw_cpu_policy')]
cpu_thread_policy = [extra_specs.get('hw:cpu_thread_policy'),
image_props.get('hw_cpu_thread_policy')]
if not host_topology:
return True
if fields.CPUAllocationPolicy.DEDICATED not in cpu_policy:
return True
if fields.CPUThreadAllocationPolicy.REQUIRE not in cpu_thread_policy:
return True
if not host_topology.has_threads:
LOG.debug("%(host_state)s fails CPU policy requirements. "
"Host does not have hyperthreading or "
"hyperthreading is disabled, but 'require' threads "
"policy was requested.", {'host_state': host_state})
return False
return True
def host_passes(self, host_state, spec_obj):
# TODO(stephenfin): The 'numa_fit_instance_to_host' function has the
# unfortunate side effect of modifying 'spec_obj.numa_topology' - an
# InstanceNUMATopology object - by populating the 'cpu_pinning' field.
# This is rather rude and said function should be reworked to avoid
# doing this. That's a large, non-backportable cleanup however, so for
# now we just duplicate spec_obj to prevent changes propagating to
# future filter calls.
spec_obj = spec_obj.obj_clone()
ram_ratio = host_state.ram_allocation_ratio
cpu_ratio = host_state.cpu_allocation_ratio
extra_specs = spec_obj.flavor.extra_specs
image_props = spec_obj.image.properties
requested_topology = spec_obj.numa_topology
host_topology = host_state.numa_topology
pci_requests = spec_obj.pci_requests
network_metadata = None
if 'network_metadata' in spec_obj:
network_metadata = spec_obj.network_metadata
if pci_requests:
pci_requests = pci_requests.requests
if not self._satisfies_cpu_policy(host_state, extra_specs,
image_props):
return False
if requested_topology and host_topology:
limits = objects.NUMATopologyLimits(
cpu_allocation_ratio=cpu_ratio,
ram_allocation_ratio=ram_ratio)
if network_metadata:
limits.network_metadata = network_metadata
good_candidates = []
for candidate in host_state.allocation_candidates:
LOG.debug(
'NUMATopologyFilter tries allocation candidate: %s, %s',
candidate, requested_topology
)
instance_topology = (hardware.numa_fit_instance_to_host(
host_topology, requested_topology,
limits=limits,
pci_requests=pci_requests,
pci_stats=host_state.pci_stats,
provider_mapping=candidate['mappings'],
))
if instance_topology:
LOG.debug(
'NUMATopologyFilter accepted allocation candidate: %s',
candidate
)
good_candidates.append(candidate)
else:
LOG.debug(
'NUMATopologyFilter rejected allocation candidate: %s',
candidate
)
host_state.allocation_candidates = good_candidates
if not host_state.allocation_candidates:
LOG.debug("%(host)s, %(node)s fails NUMA topology "
"requirements. The instance does not fit on this "
"host.", {'host': host_state.host,
'node': host_state.nodename},
instance_uuid=spec_obj.instance_uuid)
return False
host_state.limits['numa_topology'] = limits
return True
elif requested_topology:
LOG.debug("%(host)s, %(node)s fails NUMA topology requirements. "
"No host NUMA topology while the instance specified "
"one.",
{'host': host_state.host, 'node': host_state.nodename},
instance_uuid=spec_obj.instance_uuid)
return False
else:
return True
|