diff options
35 files changed, 2844 insertions, 96 deletions
diff --git a/doc/ext/support_matrix.py b/doc/ext/support_matrix.py new file mode 100644 index 0000000000..6e423fdc02 --- /dev/null +++ b/doc/ext/support_matrix.py @@ -0,0 +1,424 @@ +# Copyright (C) 2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This provides a sphinx extension able to render the source/support-matrix.ini +file into the developer documentation. + +It is used via a single directive in the .rst file + + .. support_matrix:: + +""" + +import re + +from six.moves import configparser + +from docutils import nodes +from docutils.parsers import rst + + +class SupportMatrix(object): + """Represents the entire support matrix for Nova virt drivers + """ + + def __init__(self): + # List of SupportMatrixFeature instances, describing + # all the features present in Nova virt drivers + self.features = [] + + # Dict of (name, SupportMatrixTarget) enumerating + # all the hypervisor drivers that have data recorded + # for them in self.features. The 'name' dict key is + # the value from the SupportMatrixTarget.key attribute + self.targets = {} + + +class SupportMatrixFeature(object): + + STATUS_MANDATORY = "mandatory" + STATUS_CHOICE = "choice" + STATUS_CONDITION = "condition" + STATUS_OPTIONAL = "optional" + + STATUS_ALL = [STATUS_MANDATORY, STATUS_CHOICE, + STATUS_CONDITION, STATUS_OPTIONAL] + + def __init__(self, key, title, status=STATUS_OPTIONAL, + group=None, notes=None): + # A unique key (eg 'foo.bar.wizz') to identify the feature + self.key = key + # A human friendly short title for the feature + self.title = title + # One of the status constants + self.status = status + # Detail string if status was choice/condition + self.group = group + # Arbitrarily long string describing the feature in detail + self.notes = notes + # Dict of (name, SupportMatrixImplementation) detailing + # the implementation for each hypervisor driver. The + # 'name' dict key is the value from SupportMatrixTarget.key + # for the hypervisor in question + self.implementations = {} + + +class SupportMatrixImplementation(object): + + STATUS_COMPLETE = "complete" + STATUS_PARTIAL = "partial" + STATUS_MISSING = "missing" + + STATUS_ALL = [STATUS_COMPLETE, STATUS_PARTIAL, STATUS_MISSING] + + def __init__(self, status=STATUS_MISSING, notes=None): + # One of the status constants detailing the implementation + # level + self.status = status + # Arbitrary string describing any caveats of the implementation. + # Mandatory if status is 'partial', optional otherwise. + self.notes = notes + + +class SupportMatrixTarget(object): + + def __init__(self, key, title, driver, hypervisor=None, architecture=None): + """:param key: Unique identifier for the hypervisor driver + :param title: Human friendly name of the hypervisor + :param driver: Name of the Nova driver + :param hypervisor: (optional) Name of the hypervisor, if many + :param architecture: (optional) Name of the architecture, if many + """ + self.key = key + self.title = title + self.driver = driver + self.hypervisor = hypervisor + self.architecture = architecture + + +class SupportMatrixDirective(rst.Directive): + + option_spec = { + 'support-matrix': unicode, + } + + def run(self): + matrix = self._load_support_matrix() + return self._build_markup(matrix) + + def _load_support_matrix(self): + """Reads the support-matrix.ini file and populates an instance + of the SupportMatrix class with all the data. + + :returns: SupportMatrix instance + """ + + cfg = configparser.SafeConfigParser() + env = self.state.document.settings.env + fname = self.options.get("support-matrix", + "support-matrix.ini") + rel_fpath, fpath = env.relfn2path(fname) + with open(fpath) as fp: + cfg.readfp(fp) + + # This ensures that the docs are rebuilt whenever the + # .ini file changes + env.note_dependency(rel_fpath) + + matrix = SupportMatrix() + + # The 'targets' section is special - it lists all the + # hypervisors that this file records data for + for item in cfg.options("targets"): + if not item.startswith("driver-impl-"): + continue + + # The driver string will optionally contain + # a hypervisor and architecture qualifier + # so we expect between 1 and 3 components + # in the name + key = item[12:] + title = cfg.get("targets", item) + name = key.split("-") + if len(name) == 1: + target = SupportMatrixTarget(key, + title, + name[0]) + elif len(name) == 2: + target = SupportMatrixTarget(key, + title, + name[0], + name[1]) + elif len(name) == 3: + target = SupportMatrixTarget(key, + title, + name[0], + name[1], + name[2]) + else: + raise Exception("'%s' field is malformed in '[%s]' section" % + (item, "DEFAULT")) + + matrix.targets[key] = target + + # All sections except 'targets' describe some feature of + # the Nova hypervisor driver implementation + for section in cfg.sections(): + if section == "targets": + continue + if not cfg.has_option(section, "title"): + raise Exception( + "'title' field missing in '[%s]' section" % section) + + title = cfg.get(section, "title") + + status = SupportMatrixFeature.STATUS_OPTIONAL + if cfg.has_option(section, "status"): + # The value is a string "status(group)" where + # the 'group' part is optional + status = cfg.get(section, "status") + offset = status.find("(") + group = None + if offset != -1: + group = status[offset + 1:-1] + status = status[0:offset] + + if status not in SupportMatrixFeature.STATUS_ALL: + raise Exception( + "'status' field value '%s' in ['%s']" + "section must be %s" % + (status, section, + ",".join(SupportMatrixFeature.STATUS_ALL))) + + notes = None + if cfg.has_option(section, "notes"): + notes = cfg.get(section, "notes") + feature = SupportMatrixFeature(section, + title, + status, + group, + notes) + + # Now we've got the basic feature details, we must process + # the hypervisor driver implementation for each feature + for item in cfg.options(section): + if not item.startswith("driver-impl-"): + continue + + key = item[12:] + if key not in matrix.targets: + raise Exception( + "Driver impl '%s' in '[%s]' not declared" % + (item, section)) + + status = cfg.get(section, item) + if status not in SupportMatrixImplementation.STATUS_ALL: + raise Exception( + "'%s' value '%s' in '[%s]' section must be %s" % + (item, status, section, + ",".join(SupportMatrixImplementation.STATUS_ALL))) + + noteskey = "driver-notes-" + item[12:] + notes = None + if cfg.has_option(section, noteskey): + notes = cfg.get(section, noteskey) + + target = matrix.targets[key] + impl = SupportMatrixImplementation(status, + notes) + feature.implementations[target.key] = impl + + for key in matrix.targets: + if key not in feature.implementations: + raise Exception("'%s' missing in '[%s]' section" % + (target.key, section)) + + matrix.features.append(feature) + + return matrix + + def _build_markup(self, matrix): + """Constructs the docutils content for the support matrix + """ + content = [] + self._build_summary(matrix, content) + self._build_details(matrix, content) + return content + + def _build_summary(self, matrix, content): + """Constructs the docutils content for the summary of + the support matrix. + + The summary consists of a giant table, with one row + for each feature, and a column for each hypervisor + driver. It provides an 'at a glance' summary of the + status of each driver + """ + + summarytitle = nodes.subtitle(text="Summary") + summary = nodes.table() + cols = len(matrix.targets.keys()) + cols += 2 + summarygroup = nodes.tgroup(cols=cols) + summarybody = nodes.tbody() + summaryhead = nodes.thead() + + for i in range(cols): + summarygroup.append(nodes.colspec(colwidth=1)) + summarygroup.append(summaryhead) + summarygroup.append(summarybody) + summary.append(summarygroup) + content.append(summarytitle) + content.append(summary) + + # This sets up all the column headers - two fixed + # columns for feature name & status + header = nodes.row() + blank = nodes.entry() + blank.append(nodes.emphasis(text="Feature")) + header.append(blank) + blank = nodes.entry() + blank.append(nodes.emphasis(text="Status")) + header.append(blank) + summaryhead.append(header) + + # then one column for each hypervisor driver + impls = matrix.targets.keys() + impls.sort() + for key in impls: + target = matrix.targets[key] + implcol = nodes.entry() + header.append(implcol) + implcol.append(nodes.strong(text=target.title)) + + # We now produce the body of the table, one row for + # each feature to report on + for feature in matrix.features: + item = nodes.row() + + # the hyperlink target name linking to details + id = re.sub("[^a-zA-Z0-9_]", "_", + feature.key) + + # first the to fixed columns for title/status + keycol = nodes.entry() + item.append(keycol) + keyref = nodes.reference(refid=id) + keytxt = nodes.inline() + keycol.append(keytxt) + keytxt.append(keyref) + keyref.append(nodes.strong(text=feature.title)) + + statuscol = nodes.entry() + item.append(statuscol) + statuscol.append(nodes.inline( + text=feature.status, + classes=["sp_feature_" + feature.status])) + + # and then one column for each hypervisor driver + impls = matrix.targets.keys() + impls.sort() + for key in impls: + target = matrix.targets[key] + impl = feature.implementations[key] + implcol = nodes.entry() + item.append(implcol) + + id = re.sub("[^a-zA-Z0-9_]", "_", + feature.key + "_" + key) + + implref = nodes.reference(refid=id) + impltxt = nodes.inline() + implcol.append(impltxt) + impltxt.append(implref) + + status = "" + if impl.status == "complete": + status = u"\u2714" + elif impl.status == "missing": + status = u"\u2716" + elif impl.status == "partial": + status = u"\u2714" + + implref.append(nodes.literal( + text=status, + classes=["sp_impl_summary", "sp_impl_" + impl.status])) + + summarybody.append(item) + + def _build_details(self, matrix, content): + """Constructs the docutils content for the details of + the support matrix. + + This is generated as a bullet list of features. + Against each feature we provide the description of + the feature and then the details of the hypervisor + impls, with any driver specific notes that exist + """ + + detailstitle = nodes.subtitle(text="Details") + details = nodes.bullet_list() + + content.append(detailstitle) + content.append(details) + + # One list entry for each feature we're reporting on + for feature in matrix.features: + item = nodes.list_item() + + status = feature.status + if feature.group is not None: + status += "(" + feature.group + ")" + + # The hypervisor target name linked from summary table + id = re.sub("[^a-zA-Z0-9_]", "_", + feature.key) + + # Highlight the feature title name + item.append(nodes.strong(text=feature.title, + ids=[id])) + + para = nodes.paragraph() + para.append(nodes.strong(text="Status: " + status + ". ")) + if feature.notes is not None: + para.append(nodes.inline(text=feature.notes)) + item.append(para) + + # A sub-list giving details of each hypervisor target + impls = nodes.bullet_list() + for key in feature.implementations: + target = matrix.targets[key] + impl = feature.implementations[key] + subitem = nodes.list_item() + + id = re.sub("[^a-zA-Z0-9_]", "_", + feature.key + "_" + key) + subitem += [ + nodes.strong(text=target.title + ": "), + nodes.literal(text=impl.status, + classes=["sp_impl_" + impl.status], + ids=[id]), + ] + if impl.notes is not None: + subitem.append(nodes.paragraph(text=impl.notes)) + impls.append(subitem) + + item.append(impls) + details.append(item) + + +def setup(app): + app.add_directive('support_matrix', SupportMatrixDirective) + app.add_stylesheet('support-matrix.css') diff --git a/doc/source/_static/support-matrix.css b/doc/source/_static/support-matrix.css new file mode 100644 index 0000000000..1758ea4122 --- /dev/null +++ b/doc/source/_static/support-matrix.css @@ -0,0 +1,37 @@ + +.sp_feature_mandatory { + font-weight: bold; +} + +.sp_feature_optional { +} + +.sp_feature_choice { + font-style: italic; + font-weight: bold; +} + +.sp_feature_condition { + font-style: italic; + font-weight: bold; +} + + +.sp_impl_complete { + color: rgb(0, 120, 0); + font-weight: normal; +} + +.sp_impl_missing { + color: rgb(120, 0, 0); + font-weight: normal; +} + +.sp_impl_partial { + color: rgb(170, 170, 0); + font-weight: normal; +} + +.sp_impl_summary { + font-size: 2em; +}
\ No newline at end of file diff --git a/doc/source/conf.py b/doc/source/conf.py index 3083cd1636..257db74929 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -34,6 +34,7 @@ extensions = ['sphinx.ext.autodoc', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'oslosphinx', + "ext.support_matrix", ] todo_include_todos = True @@ -195,7 +196,7 @@ man_pages = [ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. diff --git a/doc/source/index.rst b/doc/source/index.rst index fdf978bb6f..af1c1567bb 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -48,6 +48,7 @@ Developer Docs devref/index man/index + support-matrix API Extensions ============== diff --git a/doc/source/support-matrix.ini b/doc/source/support-matrix.ini new file mode 100644 index 0000000000..c2b54caad0 --- /dev/null +++ b/doc/source/support-matrix.ini @@ -0,0 +1,777 @@ +# Copyright (C) 2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# +# +# ========================================= +# Nova Hypervisor Feature Capability Matrix +# ========================================= +# +# This obsoletes the information previously at +# +# https://wiki.openstack.org/wiki/HypervisorSupportMatrix +# +# This file contains a specification of what feature capabilities each +# hypervisor driver in Nova is able to support. Feature capabilities include +# what API operations are supported, what storage / networking features can be +# used and what aspects of the guest machine can be configured. The capabilities +# can be considered to be structured into nested groups, but in this file they +# have been flattened for ease of representation. The section names represent +# the group structure. At the top level there are the following groups defined +# +# - operation - public API operations +# - storage - host storage configuration options +# - networking - host networking configuration options +# - guest - guest hardware configuration options +# +# When considering which capabilities should be marked as mandatory, +# consider the general guiding principles listed in the support-matrix.rst +# file +# +# The 'status' field takes possible values +# +# - mandatory - unconditionally required to be implemented +# - optional - optional to support, nice to have +# - choice(group) - at least one of the options within the named group +# must be implemented +# - conditional(cond) - required, if the referenced condition is met. +# +# The value against each 'driver-impl-XXXX' entry refers to the level +# of the implementation of the feature in that driver +# +# - complete - fully implemented, expected to work at all times +# - partial - implemented, but with caveats about when it will work +# eg some configurations or hardware or guest OS may not +# support it +# - missing - not implemented at all +# +# In the case of the driver being marked as 'partial', then +# 'driver-notes-XXX' entry should be used to explain the caveats +# around the implementation. +# + +[targets] +# List of driver impls we are going to record info for later +# This list only covers drivers that are in the Nova source +# tree. Out of tree drivers should maintain their own equivalent +# document, and merge it with this when their code merges into +# Nova core. +driver-impl-xenserver=XenServer +driver-impl-libvirt-kvm-x86=Libvirt KVM (x86) +driver-impl-libvirt-qemu-x86=Libvirt QEMU (x86) +driver-impl-libvirt-lxc=Libvirt LXC +driver-impl-libvirt-xen=Libvirt Xen +driver-impl-vmware=VMWare VCenter +driver-impl-hyperv=Hyper-V +driver-impl-ironic=Ironic + +[operation.attach-volume] +title=Attach block volume to instance +status=optional +notes=The attach volume operation provides a means to hotplug + additional block storage to a running instance. This allows + storage capabilities to be expanded without interruption of + service. In a cloud model it would be more typical to just + spin up a new instance with large storage, so the ability to + hotplug extra storage is for those cases where the instance + is considered to be more of a pet than cattle. Therefore + this operation is not considered to be mandatory to support +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[operation.detach-volume] +title=Detach block volume from instance +status=optional +notes=See notes for attach volume operation +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[operation.evacuate] +title=Evacuate instances from host +status=optional +notes=This is allows a host to be placed into maintenance + mode, automatically triggering migration of any running + instances to an alternative host and preventing new + instances from being launched. This is not considered + to be a mandatory operation to support +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=missing +driver-impl-libvirt-qemu-x86=missing +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=missing +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[operation.get-guest-info] +title=Guest instance status +status=mandatory +notes=Provides a quick report on information about the guest instance, + including the power state, memory allocation, CPU allocation, number + of vCPUs and cummulative CPU execution time. As well as being + informational, the power state is used by the compute manager for + tracking changes in guests. Threfore this operation is considered + mandatory to support +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=complete + +[operation.get-host-info] +title=Guest host status +status=optional +notes=Unclear what this refers to +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[operation.live-migrate] +title=Live migrate instance across hosts +status=optional +notes=Live migration provides a way to move an instance off one + compute host, to another compute host. Administrators may use + this to evacuate instances from a host that needs to undergo + maintenance tasks, though of course this may not help if the + host is already suffering a failure. In general instances are + considered cattle rather than pets, so it is expected that an + instance is liable to be killed if host maintenance is required. + It is technically challenging for some hypervisors to provide + support for the live migration operation, particularly those + built on the container based virtualization. Therefore this + operation is not considered mandatory to support. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=complete +driver-impl-vmware=missing +driver-notes-vmware=https://bugs.launchpad.net/nova/+bug/1192192 +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[operation.launch] +title=Launch instance +status=mandatory +notes=Importing pre-existing running virtual machines on a host is + considered out of scope of the cloud paradigm. Therefore this + operation is mandatory to support in drivers +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=complete + +[operation.pause] +title=Stop instance CPUs +status=optional +notes=Stopping an instances CPUs can be thought of as roughly + equivalent to suspend-to-RAM. The instance is still present + in memory, but execution has stopped. The problem, however, + is that there is no mechanism to inform the guest OS that + this takes places, so upon unpausing its clocks will no + longer report correct time. For this reason hypervisor vendors + generally discourage use of this feature and some do not even + implement it. Therefore this operation is considered optional + to support in drivers +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=missing +driver-impl-hyperv=complete +driver-impl-ironic=partial +driver-notes-ironic=Only certain ironic drivers support this + +[operation.reboot] +title=Reboot instance +status=optional +notes=It is reasonable for a guest OS administrator to trigger a + graceful reboot from inside the instance. A host initiated + graceful reboot requires guest co-operation and a non-graceful + reboot can be achieved by a combination of stop+start. Therefore + this operation is considered optional. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=complete + +[operation.rescue] +title=Rescue instance +status=optional +notes=The rescue operation starts an instance in a special + configuration whereby it is booted from an special root + disk image. The goal is to allow an administrator to + recover the state of a broken virtual machine. In general + the cloud model considers instances to be cattle, so if + an instance breaks the general expectation is that it be + thrown away and a new instance created. Therefore this + operation is considered optional to support in drivers. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[operation.resize] +title=Resize instance +status=optional +notes=The resize operation allows the user to change a running + instance to match the size of a different flavour to the one + it was initially launched with. There are many different + flavour attributes that potentially need to be updated. In + general it is a very technically challenging problem for a + hypervisor to support the alteration of all relevant config + settings for a running instance. Therefore this operation + is considered optional to support in drivers +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=partial +driver-notes-ironic=Only certain ironic drivers support this + +[operation.resume] +title=Restore instance +status=optional +notes=See notes for the suspend operation +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=partial +driver-notes-ironic=Only certain ironic drivers support this + +[operation.service-control.wtf.com] +title=Service control +status=optional +notes=Something something, dark side, something something. + Hard to claim this is mandatory when no one seems to know + what "Service control" refers to in the context of virt + drivers +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=complete +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[operation.set-admin-password] +title=Set instance admin password +status=optional +notes=Provides a mechanism to re(set) the password of the administrator + account insider the instance operating system. This requires that the + hypervisor have a way to communicate with the running guest operating + system. Given the wide range of operating systems in existance it is + unreasonable to expect this to be practical in the general case. The + configdrive and metadata service both provide a mechanism for setting + the administrator password at initial boot time. In the case where this + operation were not available, the administrator would simply have to + login to the guest and change the password in the normal manner, so + this is just a convenient optimization. Therefore this operation is + not considered mandatory for drivers to support. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=missing +driver-impl-libvirt-qemu-x86=missing +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=missing +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[operation.snapshot] +title=Save snapshot of instance disk +status=optional +notes=The snapshot operation allows the current state of the + instance root disk to be saved and uploaded back into the + glance image repository. The instance can later be booted + again using this saved image. This is in effect making + the ephemeral instance root disk into a semi-persistent + storage, in so much as it is preserved even though the guest + is no longer running. In general though, the expectation is + that the root disks are ephemeral so the ability to take a + snapshot cannot be assumed. Therefore this operation is not + considered mandatory to support. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[operation.suspend] +title=Suspend instance +status=optional +notes=Suspending an instance can be thought of as roughly + equivalent to suspend-to-disk. The instance no longer + consumes any RAM or CPUs, with its live running state + having been preserved in a file on disk. It can later + be restored, at which point it should continue execution + where it left off. As with pause, it suffers from the fact + that the guest OS will typically be left with a clock that + is no longer telling correct time. For container based + virtualization solutions, this operation is particularly + technically challenging to implement and is an area of + active research. This operation tends to make more sense + when thinking of instances as pets, rather than cattle, + since with cattle it would be simpler to just terminate + the instance instead of suspending. Therefore this operation + is considered optional to support +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=partial +driver-notes-ironic=Only certain ironic drivers support this + +[operation.swap-volume] +title=Swap block volumes +status=optional +notes=The swap volume operation is a mechanism for changing running + instance so that its attached volume(s) are backed by different + storage in the host. An alternative to this would be to simply + terminate the existing instance and spawn a new instance with the + new storage. IOW this operation is primarily targetted towards + the pet use case rather than cattle. Therefore this is considered + optional to support +driver-impl-xenserver=missing +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=missing +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[operation.terminate] +title=Shutdown instance +status=mandatory +notes=The ability to terminate a virtual machine is required in + order for a cloud user to stop utilizing resources and thus + avoid indefinitely ongoing billing. Therefore this operation + is mandatory to support in drivers +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=complete + +[operation.unpause] +title=Resume instance CPUs +status=optional +notes=See notes for the pause operation +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=missing +driver-impl-hyperv=complete +driver-impl-ironic=partial +driver-notes-ironic=Only certain ironic drivers support this + +[guest.disk.autoconfigure.wtf.com] +title=Auto configure disk +status=optional +notes=something something, dark side, something something. + Unclear just what this is about. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=missing +driver-impl-libvirt-qemu-x86=missing +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=missing +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[guest.disk.rate-limit] +title=Instance disk I/O limits +status=optional +notes=The ability to set rate limits on virtual disks allows for + greater performance isolation between instances running on the + same host storage. It is valid to delegate scheduling of I/O + operations to the hypervisor with its default settings, instead + of doing fine grained tuning. Therefore this is not considered + to be an mandatory configuration to support. +driver-impl-xenserver=missing +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=missing +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[guest.setup.configdrive] +title=Config drive support +status=choice(guest.setup) +notes=The config drive provides an information channel into + the guest operating system, to enable configuration of the + administrator password, file injection, registration of + SSH keys, etc. Since cloud images typically ship with all + login methods locked, a mechanism to set the administrator + password of keys is required to get login access. Alternatives + include the metadata service and disk injection. At least one + of the guest setup mechanisms is required to be supported by + drivers, in order to enable login access. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[guest.setup.inject.file] +title=Inject files into disk image +status=optional +notes=This allows for the end user to provide data for multiple + files to be injected into the root filesystem before an instance + is booted. This requires that the compute node understand the + format of the filesystem and any partitioning scheme it might + use on the block device. This is a non-trivial problem considering + the vast number of filesystems in existance. The problem of injecting + files to a guest OS is better solved by obtaining via the metadata + service or config drive. Therefore this operation is considered + optional to support. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=missing +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[guest.setup.inject.networking] +title=Inject guest networking config +status=optional +notes=This allows for static networking configuration (IP + address, netmask, gateway and routes) to be injected directly + into the root filesystem before an instance is booted. This + requires that the compute node understand how networking is + configured in the guest OS which is a non-trivial problem + considering the vast number of operating system types. The + problem of configuring networking is better solved by DHCP + or by obtaining static config via the metadata service or + config drive. Therefore this operation is considered optional + to support. +driver-impl-xenserver=partial +driver-notes-xenserver=Only for Debian derived guests +driver-impl-libvirt-kvm-x86=partial +driver-notes-libvirt-kvm-x86=Only for Debian derived guests +driver-impl-libvirt-qemu-x86=partial +driver-notes-libvirt-qemu-x86=Only for Debian derived guests +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=partial +driver-notes-vmware=requires vmware tools installed +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[console.rdp] +title=Remote desktop over RDP +status=choice(console) +notes=This allows the administrator to interact with the graphical + console of the guest OS via RDP. This provides a way to see boot + up messages and login to the instance when networking configuration + has failed, thus preventing a network based login. Some operating + systems may prefer to emit messages via the serial console for + easier consumption. Therefore support for this operation is not + mandatory, however, a driver is required to support at least one + of the listed console access operations. +driver-impl-xenserver=missing +driver-impl-libvirt-kvm-x86=missing +driver-impl-libvirt-qemu-x86=missing +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=missing +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[console.serial.log] +title=View serial console logs +status=choice(console) +notes=This allows the administrator to query the logs of data + emitted by the guest OS on its virtualized serial port. For + UNIX guests this typically includes all boot up messages and + so is useful for diagnosing problems when an instance fails + to successfully boot. Not all guest operating systems will be + able to emit boot information on a serial console, others may + only support graphical consoles. Therefore support for this + operation is not mandatory, however, a driver is required to + support at least one of the listed console access operations. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[console.spice] +title=Remote desktop over SPICE +status=choice(console) +notes=This allows the administrator to interact with the graphical + console of the guest OS via SPICE. This provides a way to see boot + up messages and login to the instance when networking configuration + has failed, thus preventing a network based login. Some operating + systems may prefer to emit messages via the serial console for + easier consumption. Therefore support for this operation is not + mandatory, however, a driver is required to support at least one + of the listed console access operations. +driver-impl-xenserver=missing +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=missing +driver-impl-vmware=missing +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[console.vnc] +title=Remote desktop over VNC +status=choice(console) +notes=This allows the administrator to interact with the graphical + console of the guest OS via VNC. This provides a way to see boot + up messages and login to the instance when networking configuration + has failed, thus preventing a network based login. Some operating + systems may prefer to emit messages via the serial console for + easier consumption. Therefore support for this operation is not + mandatory, however, a driver is required to support at least one + of the listed console access operations. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=missing +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[storage.block] +title=Block storage support +status=optional +notes=Block storage provides instances with direct attached + virtual disks that can be used for persistent storage of data. + As an alternative to direct attached disks, an instance may + choose to use network based persistent storage. OpenStack provides + object storage via the Swift service, or a traditional filesystem + such as as NFS/GlusterFS may be used. Some types of instance may + not require persistent storage at all, being simple transaction + processing systems reading requests & sending results from/to + the network. Therefore support for this configuration is not + considered mandatory for drivers to support. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[storage.block.backend.fibrechannel] +title=Block storage over fibre channel +status=optional +notes=To maximise performance of the block storage, it may be desirable + to directly access fibre channel LUNs from the underlying storage + technology on the compute hosts. Since this is just a performance + optimization of the I/O path it is not considered mandatory to support. +driver-impl-xenserver=missing +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=missing +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[storage.block.backend.iscsi] +title=Block storage over iSCSI +status=condition(storage.block==complete) +notes=If the driver wishes to support block storage, it is common to + provide an iSCSI based backend to access the storage from cinder. + This isolates the compute layer for knowledge of the specific storage + technology used by Cinder, albeit at a potential performance cost due + to the longer I/O path involved. If the driver chooses to support + block storage, then this is considered mandatory to support, otherwise + it is considered optional. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[storage.block.backend.iscsi.auth.chap] +title=CHAP authentication for iSCSI +status=optional +notes=If accessing the cinder iSCSI service over an untrusted LAN it + is desirable to be able to enable authentication for the iSCSI + protocol. CHAP is the commonly used authentication protocol for + iSCSI. This is not considered mandatory to support (?) +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=missing + +[storage.image] +title=Image storage support +status=mandatory +notes=This refers to the ability to boot an instance from an image + stored in the glance image repository. Without this feature it + would not be possible to bootstrap from a clean environment, since + there would be no way to get block volumes populated and reliance + on external PXE servers is out of scope. Therefore this is considered + a mandatory storage feature to support. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=complete + +[networking.firewallrules] +title=Network firewall rules +status=optional +notes=Unclear how this is different from security groups +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=missing +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[networking.routing] +title=Network routing +status=optional +notes=Unclear what this refers to +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=missing +driver-impl-ironic=complete + +[networking.securitygroups] +title=Network security groups +status=optional +notes=The security groups feature provides a way to define rules + to isolate the network traffic of different instances running + on a compute host. This would prevent actions such as MAC and + IP address spoofing, or the ability to setup rogue DHCP servers. + In a private cloud environment this may be considered to be a + superfluous requirement. Thereforce this is considered to be + an optional configuration to support. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=missing +driver-impl-hyperv=missing +driver-impl-ironic=missing + +[networking.topology.flat] +title=Flat networking +status=choice(networking.topology) +notes=Provide network conenctivity to guests using a + flat topology across all compute nodes. At least one + of the networking configurations is mandatory to + support in the drivers. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=complete +driver-impl-ironic=complete + +[networking.topology.vlan] +title=VLAN networking +status=choice(networking.topology) +notes=Provide network connectivity to guests using VLANs + to define the topology. At least one of the networking + configurations is mandatory to support in the drivers. +driver-impl-xenserver=complete +driver-impl-libvirt-kvm-x86=complete +driver-impl-libvirt-qemu-x86=complete +driver-impl-libvirt-lxc=complete +driver-impl-libvirt-xen=complete +driver-impl-vmware=complete +driver-impl-hyperv=missing +driver-impl-ironic=missing diff --git a/doc/source/support-matrix.rst b/doc/source/support-matrix.rst new file mode 100644 index 0000000000..98fdcd0f3e --- /dev/null +++ b/doc/source/support-matrix.rst @@ -0,0 +1,37 @@ + +Hypervisor Support Matrix +========================= + +When considering which capabilities should be marked as mandatory the +following general guiding principles were applied + +* **Inclusivity** - people have shown ability to make effective + use of a wide range of virtualization technologies with broadly + varying featuresets. Aiming to keep the requirements as inclusive + as possible, avoids second-guessing what a user may wish to use + the cloud compute service for. + +* **Bootstrapping** - a practical use case test is to consider that + starting point for the compute deploy is an empty data center + with new machines and network connectivity. The look at what + are the minimum features required of a compute service, in order + to get user instances running and processing work over the + network. + +* **Competition** - an early leader in the cloud compute service space + was Amazon EC2. A sanity check for whether a feature should be + mandatory is to consider whether it was available in the first + public release of EC2. This had quite a narrow featureset, but + none the less found very high usage in many use cases. So it + serves to illustrate that many features need not be considered + mandatory in order to get useful work done. + +* **Reality** - there are many virt drivers currently shipped with + Nova, each with their own supported feature set. Any feature which is + missing in at least one virt driver that is already in-tree, must + by inference be considered optional until all in-tree drivers + support it. This does not rule out the possibility of a currently + optional feature becoming mandatory at a later date, based on other + principles above. + +.. support_matrix:: diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters index 1e0c029af2..4d35d4fb03 100644 --- a/etc/nova/rootwrap.d/compute.filters +++ b/etc/nova/rootwrap.d/compute.filters @@ -227,3 +227,5 @@ cp: CommandFilter, cp, root # nova/virt/xenapi/vm_utils.py: sync: CommandFilter, sync, root +# nova/virt/libvirt/imagebackend.py: +ploop: CommandFilter, ploop, root diff --git a/nova/compute/claims.py b/nova/compute/claims.py index 1469d91ecf..1cb03779d7 100644 --- a/nova/compute/claims.py +++ b/nova/compute/claims.py @@ -206,12 +206,27 @@ class Claim(NopClaim): if host_topology: host_topology = objects.NUMATopology.obj_from_db_obj( host_topology) + pci_requests = objects.InstancePCIRequests.get_by_instance_uuid( + self.context, self.instance['uuid']) + + pci_stats = None + if pci_requests.requests: + pci_stats = self.tracker.pci_tracker.stats + instance_topology = ( hardware.numa_fit_instance_to_host( host_topology, requested_topology, - limits_topology=limit)) + limits_topology=limit, + pci_requests=pci_requests.requests, + pci_stats=pci_stats)) + if requested_topology and not instance_topology: - return (_("Requested instance NUMA topology cannot fit " + if pci_requests.requests: + return (_("Requested instance NUMA topology together with" + " requested PCI devices cannot fit the given" + " host NUMA topology")) + else: + return (_("Requested instance NUMA topology cannot fit " "the given host NUMA topology")) elif instance_topology: self.claimed_numa_topology = instance_topology diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 5ca46c4bab..08f53d1def 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -2527,10 +2527,11 @@ class ComputeManager(manager.Manager): # is already in ERROR. self._cleanup_volumes(context, instance.uuid, bdms, raise_exc=False) - # if a delete task succeed, always update vm state and task + # if a delete task succeeded, always update vm state and task # state without expecting task state to be DELETING instance.vm_state = vm_states.DELETED instance.task_state = None + instance.power_state = power_state.NOSTATE instance.terminated_at = timeutils.utcnow() instance.save() self._update_resource_tracker(context, instance) diff --git a/nova/context.py b/nova/context.py index e78636cdde..18fc2e195e 100644 --- a/nova/context.py +++ b/nova/context.py @@ -109,7 +109,7 @@ class RequestContext(object): if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog - if s.get('type') in ('volume', 'volumev2')] + if s.get('type') in ('volume', 'volumev2', 'key-manager')] else: # if list is empty or none self.service_catalog = [] diff --git a/nova/keymgr/barbican.py b/nova/keymgr/barbican.py new file mode 100644 index 0000000000..d9de15950f --- /dev/null +++ b/nova/keymgr/barbican.py @@ -0,0 +1,346 @@ +# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Key manager implementation for Barbican +""" + +import array +import base64 +import binascii + +from barbicanclient import client as barbican_client +from keystoneclient import session +from oslo.config import cfg +from oslo.utils import excutils + +from nova import exception +from nova.i18n import _ +from nova.i18n import _LE +from nova.keymgr import key as keymgr_key +from nova.keymgr import key_mgr +from nova.openstack.common import log as logging + +barbican_opts = [ + cfg.StrOpt('catalog_info', + default='key-manager:barbican:public', + help='Info to match when looking for barbican in the service ' + 'catalog. Format is: separated values of the form: ' + '<service_type>:<service_name>:<endpoint_type>'), + cfg.StrOpt('endpoint_template', + help='Override service catalog lookup with template for ' + 'barbican endpoint e.g. ' + 'http://localhost:9311/v1/%(project_id)s'), + cfg.StrOpt('os_region_name', + help='Region name of this node'), +] + +CONF = cfg.CONF +BARBICAN_OPT_GROUP = 'barbican' + +CONF.register_opts(barbican_opts, group=BARBICAN_OPT_GROUP) + +session.Session.register_conf_options(CONF, BARBICAN_OPT_GROUP) + +LOG = logging.getLogger(__name__) + + +class BarbicanKeyManager(key_mgr.KeyManager): + """Key Manager Interface that wraps the Barbican client API.""" + + def __init__(self): + self._barbican_client = None + self._base_url = None + + def _get_barbican_client(self, ctxt): + """Creates a client to connect to the Barbican service. + + :param ctxt: the user context for authentication + :return: a Barbican Client object + :raises Forbidden: if the ctxt is None + """ + + if not self._barbican_client: + # Confirm context is provided, if not raise forbidden + if not ctxt: + msg = _("User is not authorized to use key manager.") + LOG.error(msg) + raise exception.Forbidden(msg) + + try: + _SESSION = session.Session.load_from_conf_options( + CONF, + BARBICAN_OPT_GROUP) + + auth = ctxt.get_auth_plugin() + service_type, service_name, interface = (CONF. + barbican. + catalog_info. + split(':')) + region_name = CONF.barbican.os_region_name + service_parameters = {'service_type': service_type, + 'service_name': service_name, + 'interface': interface, + 'region_name': region_name} + + if CONF.barbican.endpoint_template: + self._base_url = (CONF.barbican.endpoint_template % + ctxt.to_dict()) + else: + self._base_url = _SESSION.get_endpoint( + auth, **service_parameters) + + # the barbican endpoint can't have the '/v1' on the end + self._barbican_endpoint = self._base_url.rpartition('/')[0] + + sess = session.Session(auth=auth) + self._barbican_client = barbican_client.Client( + session=sess, + endpoint=self._barbican_endpoint) + + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error creating Barbican client: %s"), e) + + return self._barbican_client + + def create_key(self, ctxt, expiration=None, name='Nova Compute Key', + payload_content_type='application/octet-stream', mode='CBC', + algorithm='AES', length=256): + """Creates a key. + + :param ctxt: contains information of the user and the environment + for the request (nova/context.py) + :param expiration: the date the key will expire + :param name: a friendly name for the secret + :param payload_content_type: the format/type of the secret data + :param mode: the algorithm mode (e.g. CBC or CTR mode) + :param algorithm: the algorithm associated with the secret + :param length: the bit length of the secret + + :return: the UUID of the new key + :raises Exception: if key creation fails + """ + barbican_client = self._get_barbican_client(ctxt) + + try: + key_order = barbican_client.orders.create_key( + name, + algorithm, + length, + mode, + payload_content_type, + expiration) + order_ref = key_order.submit() + order = barbican_client.orders.get(order_ref) + return self._retrieve_secret_uuid(order.secret_ref) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error creating key: %s"), e) + + def store_key(self, ctxt, key, expiration=None, name='Nova Compute Key', + payload_content_type='application/octet-stream', + payload_content_encoding='base64', algorithm='AES', + bit_length=256, mode='CBC', from_copy=False): + """Stores (i.e., registers) a key with the key manager. + + :param ctxt: contains information of the user and the environment for + the request (nova/context.py) + :param key: the unencrypted secret data. Known as "payload" to the + barbicanclient api + :param expiration: the expiration time of the secret in ISO 8601 + format + :param name: a friendly name for the key + :param payload_content_type: the format/type of the secret data + :param payload_content_encoding: the encoding of the secret data + :param algorithm: the algorithm associated with this secret key + :param bit_length: the bit length of this secret key + :param mode: the algorithm mode used with this secret key + :param from_copy: establishes whether the function is being used + to copy a key. In case of the latter, it does not + try to decode the key + + :returns: the UUID of the stored key + :raises Exception: if key storage fails + """ + barbican_client = self._get_barbican_client(ctxt) + + try: + if key.get_algorithm(): + algorithm = key.get_algorithm() + if payload_content_type == 'text/plain': + payload_content_encoding = None + encoded_key = key.get_encoded() + elif (payload_content_type == 'application/octet-stream' and + not from_copy): + key_list = key.get_encoded() + string_key = ''.join(map(lambda byte: "%02x" % byte, key_list)) + encoded_key = base64.b64encode(binascii.unhexlify(string_key)) + else: + encoded_key = key.get_encoded() + secret = barbican_client.secrets.create(name, + encoded_key, + payload_content_type, + payload_content_encoding, + algorithm, + bit_length, + mode, + expiration) + secret_ref = secret.store() + return self._retrieve_secret_uuid(secret_ref) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error storing key: %s"), e) + + def copy_key(self, ctxt, key_id): + """Copies (i.e., clones) a key stored by barbican. + + :param ctxt: contains information of the user and the environment for + the request (nova/context.py) + :param key_id: the UUID of the key to copy + :return: the UUID of the key copy + :raises Exception: if key copying fails + """ + + try: + secret = self._get_secret(ctxt, key_id) + con_type = secret.content_types['default'] + secret_data = self._get_secret_data(secret, + payload_content_type=con_type) + key = keymgr_key.SymmetricKey(secret.algorithm, secret_data) + copy_uuid = self.store_key(ctxt, key, secret.expiration, + secret.name, con_type, + 'base64', + secret.algorithm, secret.bit_length, + secret.mode, True) + return copy_uuid + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error copying key: %s"), e) + + def _create_secret_ref(self, key_id): + """Creates the URL required for accessing a secret. + + :param key_id: the UUID of the key to copy + + :return: the URL of the requested secret + """ + if not key_id: + msg = "Key ID is None" + raise exception.KeyManagerError(msg) + return self._base_url + "/secrets/" + key_id + + def _retrieve_secret_uuid(self, secret_ref): + """Retrieves the UUID of the secret from the secret_ref. + + :param secret_ref: the href of the secret + + :return: the UUID of the secret + """ + + # The secret_ref is assumed to be of a form similar to + # http://host:9311/v1/secrets/d152fa13-2b41-42ca-a934-6c21566c0f40 + # with the UUID at the end. This command retrieves everything + # after the last '/', which is the UUID. + return secret_ref.rpartition('/')[2] + + def _get_secret_data(self, + secret, + payload_content_type='application/octet-stream'): + """Retrieves the secret data given a secret and content_type. + + :param ctxt: contains information of the user and the environment for + the request (nova/context.py) + :param secret: the secret from barbican with the payload of data + :param payload_content_type: the format/type of the secret data + + :returns: the secret data + :raises Exception: if data cannot be retrieved + """ + try: + generated_data = secret.payload + if payload_content_type == 'application/octet-stream': + secret_data = base64.b64encode(generated_data) + else: + secret_data = generated_data + return secret_data + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error getting secret data: %s"), e) + + def _get_secret(self, ctxt, key_id): + """Returns the metadata of the secret. + + :param ctxt: contains information of the user and the environment for + the request (nova/context.py) + :param key_id: UUID of the secret + + :return: the secret's metadata + :raises Exception: if there is an error retrieving the data + """ + + barbican_client = self._get_barbican_client(ctxt) + + try: + secret_ref = self._create_secret_ref(key_id) + return barbican_client.secrets.get(secret_ref) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error getting secret metadata: %s"), e) + + def get_key(self, ctxt, key_id, + payload_content_type='application/octet-stream'): + """Retrieves the specified key. + + :param ctxt: contains information of the user and the environment for + the request (nova/context.py) + :param key_id: the UUID of the key to retrieve + :param payload_content_type: The format/type of the secret data + + :return: SymmetricKey representation of the key + :raises Exception: if key retrieval fails + """ + try: + secret = self._get_secret(ctxt, key_id) + secret_data = self._get_secret_data(secret, + payload_content_type) + if payload_content_type == 'application/octet-stream': + # convert decoded string to list of unsigned ints for each byte + key_data = array.array('B', + base64.b64decode(secret_data)).tolist() + else: + key_data = secret_data + key = keymgr_key.SymmetricKey(secret.algorithm, key_data) + return key + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error getting key: %s"), e) + + def delete_key(self, ctxt, key_id): + """Deletes the specified key. + + :param ctxt: contains information of the user and the environment for + the request (nova/context.py) + :param key_id: the UUID of the key to delete + :raises Exception: if key deletion fails + """ + barbican_client = self._get_barbican_client(ctxt) + + try: + secret_ref = self._create_secret_ref(key_id) + barbican_client.secrets.delete(secret_ref) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Error deleting key: %s"), e) diff --git a/nova/pci/manager.py b/nova/pci/manager.py index e6c17db3e7..22d3960d53 100644 --- a/nova/pci/manager.py +++ b/nova/pci/manager.py @@ -25,6 +25,7 @@ from nova import objects from nova.openstack.common import log as logging from nova.pci import device from nova.pci import stats +from nova.virt import hardware LOG = logging.getLogger(__name__) @@ -155,11 +156,23 @@ class PciDevTracker(object): context, instance) if not pci_requests.requests: return None - devs = self.stats.consume_requests(pci_requests.requests) + instance_numa_topology = hardware.instance_topology_from_instance( + instance) + instance_cells = None + if instance_numa_topology: + instance_cells = instance_numa_topology.cells + + devs = self.stats.consume_requests(pci_requests.requests, + instance_cells) if not devs: raise exception.PciDeviceRequestFailed(pci_requests) for dev in devs: device.claim(dev, instance) + if instance_numa_topology and any( + dev.numa_node is None for dev in devs): + LOG.warning(_LW("Assigning a pci device without numa affinity to" + "instance %(instance)s which has numa topology"), + {'instance': instance['uuid']}) return devs def _allocate_instance(self, instance, devs): diff --git a/nova/pci/stats.py b/nova/pci/stats.py index 03fe60566a..41737812f1 100644 --- a/nova/pci/stats.py +++ b/nova/pci/stats.py @@ -52,7 +52,7 @@ class PciDeviceStats(object): This summary information will be helpful for cloud management also. """ - pool_keys = ['product_id', 'vendor_id'] + pool_keys = ['product_id', 'vendor_id', 'numa_node'] def __init__(self, stats=None): super(PciDeviceStats, self).__init__() @@ -135,7 +135,7 @@ class PciDeviceStats(object): free_devs.extend(pool['devices']) return free_devs - def consume_requests(self, pci_requests): + def consume_requests(self, pci_requests, numa_cells=None): alloc_devices = [] for request in pci_requests: count = request.count @@ -143,6 +143,8 @@ class PciDeviceStats(object): # For now, keep the same algorithm as during scheduling: # a spec may be able to match multiple pools. pools = self._filter_pools_for_spec(self.pools, spec) + if numa_cells: + pools = self._filter_pools_for_numa_cells(pools, numa_cells) # Failed to allocate the required number of devices # Return the devices already allocated back to their pools if sum([pool['count'] for pool in pools]) < count: @@ -176,9 +178,24 @@ class PciDeviceStats(object): return [pool for pool in pools if utils.pci_device_prop_match(pool, request_specs)] - def _apply_request(self, pools, request): + @staticmethod + def _filter_pools_for_numa_cells(pools, numa_cells): + # Some systems don't report numa node info for pci devices, in + # that case None is reported in pci_device.numa_node, by adding None + # to numa_cells we allow assigning those devices to instances with + # numa topology + numa_cells = [None] + [cell.id for cell in numa_cells] + # filter out pools which numa_node is not included in numa_cells + return [pool for pool in pools if any(utils.pci_device_prop_match( + pool, [{'numa_node': cell}]) + for cell in numa_cells)] + + def _apply_request(self, pools, request, numa_cells=None): count = request.count matching_pools = self._filter_pools_for_spec(pools, request.spec) + if numa_cells: + matching_pools = self._filter_pools_for_numa_cells(matching_pools, + numa_cells) if sum([pool['count'] for pool in matching_pools]) < count: return False else: @@ -188,25 +205,31 @@ class PciDeviceStats(object): break return True - def support_requests(self, requests): + def support_requests(self, requests, numa_cells=None): """Check if the pci requests can be met. Scheduler checks compute node's PCI stats to decide if an instance can be scheduled into the node. Support does not mean real allocation. + If numa_cells is provided then only devices contained in + those nodes are considered. """ # note (yjiang5): this function has high possibility to fail, # so no exception should be triggered for performance reason. pools = copy.deepcopy(self.pools) - return all([self._apply_request(pools, r) for r in requests]) + return all([self._apply_request(pools, r, numa_cells) + for r in requests]) - def apply_requests(self, requests): + def apply_requests(self, requests, numa_cells=None): """Apply PCI requests to the PCI stats. This is used in multiple instance creation, when the scheduler has to maintain how the resources are consumed by the instances. + If numa_cells is provided then only devices contained in + those nodes are considered. """ - if not all([self._apply_request(self.pools, r) for r in requests]): + if not all([self._apply_request(self.pools, r, numa_cells) + for r in requests]): raise exception.PciDeviceRequestFailed(requests=requests) @staticmethod diff --git a/nova/scheduler/filters/numa_topology_filter.py b/nova/scheduler/filters/numa_topology_filter.py index 1721f01a87..e0d2fe8d59 100644 --- a/nova/scheduler/filters/numa_topology_filter.py +++ b/nova/scheduler/filters/numa_topology_filter.py @@ -31,6 +31,9 @@ class NUMATopologyFilter(filters.BaseHostFilter): requested_topology = hardware.instance_topology_from_instance(instance) host_topology, _fmt = hardware.host_topology_and_format_from_host( host_state) + pci_requests = filter_properties.get('pci_requests') + if pci_requests: + pci_requests = pci_requests.requests if requested_topology and host_topology: limit_cells = [] for cell in host_topology.cells: @@ -42,7 +45,9 @@ class NUMATopologyFilter(filters.BaseHostFilter): limits = hardware.VirtNUMALimitTopology(cells=limit_cells) instance_topology = (hardware.numa_fit_instance_to_host( host_topology, requested_topology, - limits_topology=limits)) + limits_topology=limits, + pci_requests=pci_requests, + pci_stats=host_state.pci_stats)) if not instance_topology: return False host_state.limits['numa_topology'] = limits.to_json() diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py index 5f81fca642..65e2ff907f 100644 --- a/nova/scheduler/host_manager.py +++ b/nova/scheduler/host_manager.py @@ -243,13 +243,20 @@ class HostState(object): # Track number of instances on host self.num_instances += 1 + instance_numa_topology = hardware.instance_topology_from_instance( + instance) + instance_cells = None + if instance_numa_topology: + instance_cells = instance_numa_topology.cells + pci_requests = instance.get('pci_requests') # NOTE(danms): Instance here is still a dict, which is converted from # an object. Thus, it has a .pci_requests field, which gets converted # to a primitive early on, and is thus a dict here. Convert this when # we get an object all the way to this path. if pci_requests and pci_requests['requests'] and self.pci_stats: - self.pci_stats.apply_requests(pci_requests.requests) + self.pci_stats.apply_requests(pci_requests.requests, + instance_cells) # Calculate the numa usage updated_numa_topology = hardware.get_host_numa_usage_from_instance( diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py index cdcde67f8e..7eac61bec4 100644 --- a/nova/tests/unit/compute/test_claims.py +++ b/nova/tests/unit/compute/test_claims.py @@ -192,6 +192,7 @@ class ClaimTestCase(test.NoDBTestCase): 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', + 'numa_node': 0, 'status': 'available'} self.tracker.new_pci_tracker() self.tracker.pci_tracker.set_hvdevs([dev_dict]) @@ -209,6 +210,7 @@ class ClaimTestCase(test.NoDBTestCase): 'address': 'a', 'product_id': 'p', 'vendor_id': 'v1', + 'numa_node': 1, 'status': 'available'} self.tracker.new_pci_tracker() self.tracker.pci_tracker.set_hvdevs([dev_dict]) @@ -226,6 +228,7 @@ class ClaimTestCase(test.NoDBTestCase): 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', + 'numa_node': 0, 'status': 'available'} self.tracker.new_pci_tracker() self.tracker.pci_tracker.set_hvdevs([dev_dict]) @@ -269,6 +272,83 @@ class ClaimTestCase(test.NoDBTestCase): self._claim(limits={'numa_topology': limit_topo.to_json()}, numa_topology=huge_instance) + @pci_fakes.patch_pci_whitelist + def test_numa_topology_with_pci(self, mock_get): + dev_dict = { + 'compute_node_id': 1, + 'address': 'a', + 'product_id': 'p', + 'vendor_id': 'v', + 'numa_node': 1, + 'status': 'available'} + self.tracker.new_pci_tracker() + self.tracker.pci_tracker.set_hvdevs([dev_dict]) + request = objects.InstancePCIRequest(count=1, + spec=[{'vendor_id': 'v', 'product_id': 'p'}]) + mock_get.return_value = objects.InstancePCIRequests( + requests=[request]) + + huge_instance = objects.InstanceNUMATopology( + cells=[objects.InstanceNUMACell( + id=1, cpuset=set([1, 2]), memory=512)]) + + self._claim(numa_topology= huge_instance) + + @pci_fakes.patch_pci_whitelist + def test_numa_topology_with_pci_fail(self, mock_get): + dev_dict = { + 'compute_node_id': 1, + 'address': 'a', + 'product_id': 'p', + 'vendor_id': 'v', + 'numa_node': 1, + 'status': 'available'} + dev_dict2 = { + 'compute_node_id': 1, + 'address': 'a', + 'product_id': 'p', + 'vendor_id': 'v', + 'numa_node': 2, + 'status': 'available'} + self.tracker.new_pci_tracker() + self.tracker.pci_tracker.set_hvdevs([dev_dict, dev_dict2]) + + request = objects.InstancePCIRequest(count=2, + spec=[{'vendor_id': 'v', 'product_id': 'p'}]) + mock_get.return_value = objects.InstancePCIRequests( + requests=[request]) + + huge_instance = objects.InstanceNUMATopology( + cells=[objects.InstanceNUMACell( + id=1, cpuset=set([1, 2]), memory=512)]) + + self.assertRaises(exception.ComputeResourcesUnavailable, + self._claim, + numa_topology=huge_instance) + + @pci_fakes.patch_pci_whitelist + def test_numa_topology_with_pci_no_numa_info(self, mock_get): + dev_dict = { + 'compute_node_id': 1, + 'address': 'a', + 'product_id': 'p', + 'vendor_id': 'v', + 'numa_node': None, + 'status': 'available'} + self.tracker.new_pci_tracker() + self.tracker.pci_tracker.set_hvdevs([dev_dict]) + + request = objects.InstancePCIRequest(count=1, + spec=[{'vendor_id': 'v', 'product_id': 'p'}]) + mock_get.return_value = objects.InstancePCIRequests( + requests=[request]) + + huge_instance = objects.InstanceNUMATopology( + cells=[objects.InstanceNUMACell( + id=1, cpuset=set([1, 2]), memory=512)]) + + self._claim(numa_topology= huge_instance) + def test_abort(self, mock_get): claim = self._abort() self.assertTrue(claim.tracker.icalled) diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index bcfaae85b1..a75ddaf260 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -285,6 +285,7 @@ class BaseTestCase(test.TestCase): inst = objects.Instance(context=self.context) inst.vm_state = vm_states.ACTIVE inst.task_state = None + inst.power_state = power_state.RUNNING inst.image_ref = FAKE_IMAGE_REF inst.reservation_id = 'r-fakeres' inst.user_id = self.user_id @@ -3988,6 +3989,13 @@ class ComputeTestCase(BaseTestCase): self.assertTrue(self.tokens_deleted) + def test_delete_instance_changes_power_state(self): + """Test that the power state is NOSTATE after deleting an instance.""" + instance = self._create_fake_instance_obj() + self.compute._delete_instance(self.context, instance, [], + self.none_quotas) + self.assertEqual(power_state.NOSTATE, instance.power_state) + def test_instance_termination_exception_sets_error(self): """Test that we handle InstanceTerminationFailure which is propagated up from the underlying driver. diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py index 3a4eeb820d..855054354e 100644 --- a/nova/tests/unit/compute/test_resource_tracker.py +++ b/nova/tests/unit/compute/test_resource_tracker.py @@ -101,7 +101,8 @@ class FakeVirtDriver(driver.ComputeDriver): 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', - 'extra_k1': 'v1' + 'extra_k1': 'v1', + 'numa_node': 1 }, { 'label': 'label_8086_0443', @@ -111,7 +112,8 @@ class FakeVirtDriver(driver.ComputeDriver): 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', - 'extra_k1': 'v1' + 'extra_k1': 'v1', + 'numa_node': 1 }, { 'label': 'label_8086_0443', @@ -121,7 +123,8 @@ class FakeVirtDriver(driver.ComputeDriver): 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', - 'extra_k1': 'v1' + 'extra_k1': 'v1', + 'numa_node': 1 }, { 'label': 'label_8086_0123', @@ -131,7 +134,8 @@ class FakeVirtDriver(driver.ComputeDriver): 'product_id': '0123', 'vendor_id': '8086', 'status': 'available', - 'extra_k1': 'v1' + 'extra_k1': 'v1', + 'numa_node': 1 }, { 'label': 'label_8086_7891', @@ -141,19 +145,22 @@ class FakeVirtDriver(driver.ComputeDriver): 'product_id': '7891', 'vendor_id': '8086', 'status': 'available', - 'extra_k1': 'v1' + 'extra_k1': 'v1', + 'numa_node': None }, ] if self.pci_support else [] self.pci_stats = [ { 'count': 2, 'vendor_id': '8086', - 'product_id': '0443' + 'product_id': '0443', + 'numa_node': 1 }, { 'count': 1, 'vendor_id': '8086', - 'product_id': '7891' + 'product_id': '7891', + 'numa_node': None }, ] if self.pci_support else [] if stats is not None: diff --git a/nova/tests/unit/keymgr/test_barbican.py b/nova/tests/unit/keymgr/test_barbican.py new file mode 100644 index 0000000000..36901f3660 --- /dev/null +++ b/nova/tests/unit/keymgr/test_barbican.py @@ -0,0 +1,223 @@ +# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test cases for the barbican key manager. +""" + +import array +import binascii + +import mock + +from nova import exception +from nova.keymgr import barbican +from nova.keymgr import key as keymgr_key +from nova.tests.unit.keymgr import test_key_mgr + + +class BarbicanKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): + + def _create_key_manager(self): + return barbican.BarbicanKeyManager() + + def setUp(self): + super(BarbicanKeyManagerTestCase, self).setUp() + + # Create fake auth_token + self.ctxt = mock.Mock() + self.ctxt.auth_token = "fake_token" + + # Create mock barbican client + self._build_mock_barbican() + + # Create a key_id, secret_ref, pre_hex, and hex to use + self.key_id = "d152fa13-2b41-42ca-a934-6c21566c0f40" + self.secret_ref = ("http://host:9311/v1/secrets/" + self.key_id) + self.pre_hex = "AIDxQp2++uAbKaTVDMXFYIu8PIugJGqkK0JLqkU0rhY=" + self.hex = ("0080f1429dbefae01b29a4d50cc5c5608bbc3c8ba0246aa42b424baa4" + "534ae16") + self.key_mgr._base_url = "http://host:9311/v1" + self.addCleanup(self._restore) + + def _restore(self): + if hasattr(self, 'original_key'): + keymgr_key.SymmetricKey = self.original_key + + def _build_mock_barbican(self): + self.mock_barbican = mock.MagicMock(name='mock_barbican') + + # Set commonly used methods + self.get = self.mock_barbican.secrets.get + self.delete = self.mock_barbican.secrets.delete + self.store = self.mock_barbican.secrets.store + self.create = self.mock_barbican.secrets.create + + self.key_mgr._barbican_client = self.mock_barbican + + def _build_mock_symKey(self): + self.mock_symKey = mock.Mock() + + def fake_sym_key(alg, key): + self.mock_symKey.get_encoded.return_value = key + self.mock_symKey.get_algorithm.return_value = alg + return self.mock_symKey + self.original_key = keymgr_key.SymmetricKey + keymgr_key.SymmetricKey = fake_sym_key + + def test_copy_key(self): + # Create metadata for original secret + original_secret_metadata = mock.Mock() + original_secret_metadata.algorithm = mock.sentinel.alg + original_secret_metadata.bit_length = mock.sentinel.bit + original_secret_metadata.name = mock.sentinel.name + original_secret_metadata.expiration = mock.sentinel.expiration + original_secret_metadata.mode = mock.sentinel.mode + content_types = {'default': 'fake_type'} + original_secret_metadata.content_types = content_types + original_secret_data = mock.Mock() + original_secret_metadata.payload = original_secret_data + + # Create href for copied secret + copied_secret = mock.Mock() + copied_secret.store.return_value = 'http://test/uuid' + + # Set get and create return values + self.get.return_value = original_secret_metadata + self.create.return_value = copied_secret + + # Create the mock key + self._build_mock_symKey() + + # Copy the original + self.key_mgr.copy_key(self.ctxt, self.key_id) + + # Assert proper methods were called + self.get.assert_called_once_with(self.secret_ref) + self.create.assert_called_once_with( + mock.sentinel.name, + self.mock_symKey.get_encoded(), + content_types['default'], + 'base64', + mock.sentinel.alg, + mock.sentinel.bit, + mock.sentinel.mode, + mock.sentinel.expiration) + copied_secret.store.assert_called_once_with() + + def test_copy_null_context(self): + self.key_mgr._barbican_client = None + self.assertRaises(exception.Forbidden, + self.key_mgr.copy_key, None, self.key_id) + + def test_create_key(self): + # Create order_ref_url and assign return value + order_ref_url = ("http://localhost:9311/v1/None/orders/" + "4fe939b7-72bc-49aa-bd1e-e979589858af") + key_order = mock.Mock() + self.mock_barbican.orders.create_key.return_value = key_order + key_order.submit.return_value = order_ref_url + + # Create order and assign return value + order = mock.Mock() + order.secret_ref = self.secret_ref + self.mock_barbican.orders.get.return_value = order + + # Create the key, get the UUID + returned_uuid = self.key_mgr.create_key(self.ctxt) + + self.mock_barbican.orders.get.assert_called_once_with(order_ref_url) + self.assertEqual(returned_uuid, self.key_id) + + def test_create_null_context(self): + self.key_mgr._barbican_client = None + self.assertRaises(exception.Forbidden, + self.key_mgr.create_key, None) + + def test_delete_null_context(self): + self.key_mgr._barbican_client = None + self.assertRaises(exception.Forbidden, + self.key_mgr.delete_key, None, self.key_id) + + def test_delete_key(self): + self.key_mgr.delete_key(self.ctxt, self.key_id) + self.delete.assert_called_once_with(self.secret_ref) + + def test_delete_unknown_key(self): + self.assertRaises(exception.KeyManagerError, + self.key_mgr.delete_key, self.ctxt, None) + + @mock.patch('base64.b64encode') + def test_get_key(self, b64_mock): + b64_mock.return_value = self.pre_hex + content_type = 'application/octet-stream' + + key = self.key_mgr.get_key(self.ctxt, self.key_id, content_type) + + self.get.assert_called_once_with(self.secret_ref) + encoded = array.array('B', binascii.unhexlify(self.hex)).tolist() + self.assertEqual(key.get_encoded(), encoded) + + def test_get_null_context(self): + self.key_mgr._barbican_client = None + self.assertRaises(exception.Forbidden, + self.key_mgr.get_key, None, self.key_id) + + def test_get_unknown_key(self): + self.assertRaises(exception.KeyManagerError, + self.key_mgr.get_key, self.ctxt, None) + + def test_store_key_base64(self): + # Create Key to store + secret_key = array.array('B', [0x01, 0x02, 0xA0, 0xB3]).tolist() + _key = keymgr_key.SymmetricKey('AES', secret_key) + + # Define the return values + secret = mock.Mock() + self.create.return_value = secret + secret.store.return_value = self.secret_ref + + # Store the Key + returned_uuid = self.key_mgr.store_key(self.ctxt, _key, bit_length=32) + + self.create.assert_called_once_with('Nova Compute Key', + 'AQKgsw==', + 'application/octet-stream', + 'base64', + 'AES', 32, 'CBC', + None) + self.assertEqual(returned_uuid, self.key_id) + + def test_store_key_plaintext(self): + # Create the plaintext key + secret_key_text = "This is a test text key." + _key = keymgr_key.SymmetricKey('AES', secret_key_text) + + # Store the Key + self.key_mgr.store_key(self.ctxt, _key, + payload_content_type='text/plain', + payload_content_encoding=None) + self.create.assert_called_once_with('Nova Compute Key', + secret_key_text, + 'text/plain', + None, + 'AES', 256, 'CBC', + None) + self.assertEqual(self.store.call_count, 0) + + def test_store_null_context(self): + self.key_mgr._barbican_client = None + self.assertRaises(exception.Forbidden, + self.key_mgr.store_key, None, None) diff --git a/nova/tests/unit/pci/test_manager.py b/nova/tests/unit/pci/test_manager.py index 08cf42e82c..c612a1105f 100644 --- a/nova/tests/unit/pci/test_manager.py +++ b/nova/tests/unit/pci/test_manager.py @@ -36,7 +36,8 @@ fake_pci = { 'product_id': 'p', 'vendor_id': 'v', 'request_id': None, - 'status': 'available'} + 'status': 'available', + 'numa_node': 0} fake_pci_1 = dict(fake_pci, address='0000:00:00.2', product_id='p1', vendor_id='v1') fake_pci_2 = dict(fake_pci, address='0000:00:00.3') @@ -63,8 +64,10 @@ fake_db_dev = { } fake_db_dev_1 = dict(fake_db_dev, vendor_id='v1', product_id='p1', id=2, - address='0000:00:00.2') -fake_db_dev_2 = dict(fake_db_dev, id=3, address='0000:00:00.3') + address='0000:00:00.2', + numa_node=0) +fake_db_dev_2 = dict(fake_db_dev, id=3, address='0000:00:00.3', + numa_node=None) fake_db_devs = [fake_db_dev, fake_db_dev_1, fake_db_dev_2] @@ -82,6 +85,7 @@ class PciDevTrackerTestCase(test.TestCase): self.inst.pci_devices = objects.PciDeviceList() self.inst.vm_state = vm_states.ACTIVE self.inst.task_state = None + self.inst.numa_topology = None def _fake_get_pci_devices(self, ctxt, node_id): return fake_db_devs[:] @@ -119,7 +123,7 @@ class PciDevTrackerTestCase(test.TestCase): free_devs = self.tracker.pci_stats.get_free_devs() self.assertEqual(len(free_devs), 3) self.assertEqual(self.tracker.stale.keys(), []) - self.assertEqual(len(self.tracker.stats.pools), 2) + self.assertEqual(len(self.tracker.stats.pools), 3) self.assertEqual(self.tracker.node_id, 1) def test_pcidev_tracker_create_no_nodeid(self): @@ -186,6 +190,36 @@ class PciDevTrackerTestCase(test.TestCase): self.inst) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance') + def test_update_pci_for_instance_with_numa(self, mock_get): + fake_db_dev_3 = dict(fake_db_dev_1, id=4, address='0000:00:00.4') + fake_devs_numa = copy.deepcopy(fake_db_devs) + fake_devs_numa.append(fake_db_dev_3) + self.tracker = manager.PciDevTracker(1) + self.tracker.set_hvdevs(fake_devs_numa) + pci_requests = copy.deepcopy(fake_pci_requests)[:1] + pci_requests[0]['count'] = 2 + self._create_pci_requests_object(mock_get, pci_requests) + self.inst.numa_topology = objects.InstanceNUMATopology( + cells=[objects.InstanceNUMACell( + id=1, cpuset=set([1, 2]), memory=512)]) + self.tracker.update_pci_for_instance(None, self.inst) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(2, len(free_devs)) + self.assertEqual('v1', free_devs[0]['vendor_id']) + self.assertEqual('v1', free_devs[1]['vendor_id']) + + @mock.patch('nova.objects.InstancePCIRequests.get_by_instance') + def test_update_pci_for_instance_with_numa_fail(self, mock_get): + self._create_pci_requests_object(mock_get, fake_pci_requests) + self.inst.numa_topology = objects.InstanceNUMATopology( + cells=[objects.InstanceNUMACell( + id=1, cpuset=set([1, 2]), memory=512)]) + self.assertRaises(exception.PciDeviceRequestFailed, + self.tracker.update_pci_for_instance, + None, + self.inst) + + @mock.patch('nova.objects.InstancePCIRequests.get_by_instance') def test_update_pci_for_instance_deleted(self, mock_get): self._create_pci_requests_object(mock_get, fake_pci_requests) self.tracker.update_pci_for_instance(None, self.inst) diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py index 6960cf93cf..840a4e8c8e 100644 --- a/nova/tests/unit/pci/test_stats.py +++ b/nova/tests/unit/pci/test_stats.py @@ -22,7 +22,7 @@ from nova.pci import stats from nova.pci import whitelist from nova import test from nova.tests.unit.pci import fakes - +from nova.virt import hardware fake_pci_1 = { 'compute_node_id': 1, 'address': '0000:00:00.1', @@ -31,16 +31,22 @@ fake_pci_1 = { 'status': 'available', 'extra_k1': 'v1', 'request_id': None, + 'numa_node': 0, } fake_pci_2 = dict(fake_pci_1, vendor_id='v2', product_id='p2', - address='0000:00:00.2') + address='0000:00:00.2', + numa_node=1) fake_pci_3 = dict(fake_pci_1, address='0000:00:00.3') +fake_pci_4 = dict(fake_pci_1, vendor_id='v3', + product_id='p3', + address='0000:00:00.3', + numa_node= None) pci_requests = [objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v1'}]), @@ -59,9 +65,11 @@ class PciDeviceStatsTestCase(test.NoDBTestCase): self.fake_dev_1 = objects.PciDevice.create(fake_pci_1) self.fake_dev_2 = objects.PciDevice.create(fake_pci_2) self.fake_dev_3 = objects.PciDevice.create(fake_pci_3) + self.fake_dev_4 = objects.PciDevice.create(fake_pci_4) map(self.pci_stats.add_device, - [self.fake_dev_1, self.fake_dev_2, self.fake_dev_3]) + [self.fake_dev_1, self.fake_dev_2, + self.fake_dev_3, self.fake_dev_4]) def setUp(self): super(PciDeviceStatsTestCase, self).setUp() @@ -72,15 +80,15 @@ class PciDeviceStatsTestCase(test.NoDBTestCase): self._create_fake_devs() def test_add_device(self): - self.assertEqual(len(self.pci_stats.pools), 2) + self.assertEqual(len(self.pci_stats.pools), 3) self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]), - set(['v1', 'v2'])) + set(['v1', 'v2', 'v3'])) self.assertEqual(set([d['count'] for d in self.pci_stats]), set([1, 2])) def test_remove_device(self): self.pci_stats.remove_device(self.fake_dev_2) - self.assertEqual(len(self.pci_stats.pools), 1) + self.assertEqual(len(self.pci_stats.pools), 2) self.assertEqual(self.pci_stats.pools[0]['count'], 2) self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1') @@ -94,29 +102,29 @@ class PciDeviceStatsTestCase(test.NoDBTestCase): m = jsonutils.dumps(self.pci_stats) new_stats = stats.PciDeviceStats(m) - self.assertEqual(len(new_stats.pools), 2) + self.assertEqual(len(new_stats.pools), 3) self.assertEqual(set([d['count'] for d in new_stats]), set([1, 2])) self.assertEqual(set([d['vendor_id'] for d in new_stats]), - set(['v1', 'v2'])) + set(['v1', 'v2', 'v3'])) def test_support_requests(self): self.assertEqual(self.pci_stats.support_requests(pci_requests), True) - self.assertEqual(len(self.pci_stats.pools), 2) + self.assertEqual(len(self.pci_stats.pools), 3) self.assertEqual(set([d['count'] for d in self.pci_stats]), set((1, 2))) def test_support_requests_failed(self): self.assertEqual( self.pci_stats.support_requests(pci_requests_multiple), False) - self.assertEqual(len(self.pci_stats.pools), 2) + self.assertEqual(len(self.pci_stats.pools), 3) self.assertEqual(set([d['count'] for d in self.pci_stats]), set([1, 2])) def test_apply_requests(self): self.pci_stats.apply_requests(pci_requests) - self.assertEqual(len(self.pci_stats.pools), 1) + self.assertEqual(len(self.pci_stats.pools), 2) self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1') self.assertEqual(self.pci_stats.pools[0]['count'], 1) @@ -140,6 +148,47 @@ class PciDeviceStatsTestCase(test.NoDBTestCase): self.pci_stats.consume_requests, pci_requests_multiple) + def test_support_requests_numa(self): + cells = [hardware.VirtNUMATopologyCell(0, None, None), + hardware.VirtNUMATopologyCell(1, None, None)] + self.assertEqual(True, self.pci_stats.support_requests( + pci_requests, cells)) + + def test_support_requests_numa_failed(self): + cells = [hardware.VirtNUMATopologyCell(0, None, None)] + self.assertEqual(False, self.pci_stats.support_requests( + pci_requests, cells)) + + def test_support_requests_no_numa_info(self): + cells = [hardware.VirtNUMATopologyCell(0, None, None)] + pci_request = [objects.InstancePCIRequest(count=1, + spec=[{'vendor_id': 'v3'}])] + self.assertEqual(True, self.pci_stats.support_requests( + pci_request, cells)) + + def test_consume_requests_numa(self): + cells = [hardware.VirtNUMATopologyCell(0, None, None), + hardware.VirtNUMATopologyCell(1, None, None)] + devs = self.pci_stats.consume_requests(pci_requests, cells) + self.assertEqual(2, len(devs)) + self.assertEqual(set(['v1', 'v2']), + set([dev['vendor_id'] for dev in devs])) + + def test_consume_requests_numa_failed(self): + cells = [hardware.VirtNUMATopologyCell(0, None, None)] + self.assertRaises(exception.PciDeviceRequestFailed, + self.pci_stats.consume_requests, + pci_requests, cells) + + def test_consume_requests_no_numa_info(self): + cells = [hardware.VirtNUMATopologyCell(0, None, None)] + pci_request = [objects.InstancePCIRequest(count=1, + spec=[{'vendor_id': 'v3'}])] + devs = self.pci_stats.consume_requests(pci_request, cells) + self.assertEqual(1, len(devs)) + self.assertEqual(set(['v3']), + set([dev['vendor_id'] for dev in devs])) + @mock.patch.object(whitelist, 'get_pci_devices_filter') class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase): @@ -163,7 +212,8 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase): 'vendor_id': '1137', 'product_id': '0071', 'status': 'available', - 'request_id': None} + 'request_id': None, + 'numa_node': 0} self.pci_tagged_devices.append(objects.PciDevice.create(pci_dev)) self.pci_untagged_devices = [] @@ -173,7 +223,8 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase): 'vendor_id': '1137', 'product_id': '0072', 'status': 'available', - 'request_id': None} + 'request_id': None, + 'numa_node': 0} self.pci_untagged_devices.append(objects.PciDevice.create(pci_dev)) map(self.pci_stats.add_device, self.pci_tagged_devices) diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py index 58b4148374..17a2948d78 100644 --- a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py +++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py @@ -40,7 +40,8 @@ class TestNUMATopologyFilter(test.NoDBTestCase): 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} host = fakes.FakeHostState('host1', 'node1', - {'numa_topology': fakes.NUMA_TOPOLOGY}) + {'numa_topology': fakes.NUMA_TOPOLOGY, + 'pci_stats': None}) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) def test_numa_topology_filter_numa_instance_no_numa_host_fail(self): @@ -55,7 +56,7 @@ class TestNUMATopologyFilter(test.NoDBTestCase): 'request_spec': { 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} - host = fakes.FakeHostState('host1', 'node1', {}) + host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None}) self.assertFalse(self.filt_cls.host_passes(host, filter_properties)) def test_numa_topology_filter_numa_host_no_numa_instance_pass(self): @@ -82,7 +83,8 @@ class TestNUMATopologyFilter(test.NoDBTestCase): 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} host = fakes.FakeHostState('host1', 'node1', - {'numa_topology': fakes.NUMA_TOPOLOGY}) + {'numa_topology': fakes.NUMA_TOPOLOGY, + 'pci_stats': None}) self.assertFalse(self.filt_cls.host_passes(host, filter_properties)) def test_numa_topology_filter_fail_memory(self): @@ -100,7 +102,8 @@ class TestNUMATopologyFilter(test.NoDBTestCase): 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} host = fakes.FakeHostState('host1', 'node1', - {'numa_topology': fakes.NUMA_TOPOLOGY}) + {'numa_topology': fakes.NUMA_TOPOLOGY, + 'pci_stats': None}) self.assertFalse(self.filt_cls.host_passes(host, filter_properties)) def test_numa_topology_filter_fail_cpu(self): @@ -117,7 +120,8 @@ class TestNUMATopologyFilter(test.NoDBTestCase): 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} host = fakes.FakeHostState('host1', 'node1', - {'numa_topology': fakes.NUMA_TOPOLOGY}) + {'numa_topology': fakes.NUMA_TOPOLOGY, + 'pci_stats': None}) self.assertFalse(self.filt_cls.host_passes(host, filter_properties)) def test_numa_topology_filter_pass_set_limit(self): @@ -135,7 +139,8 @@ class TestNUMATopologyFilter(test.NoDBTestCase): 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} host = fakes.FakeHostState('host1', 'node1', - {'numa_topology': fakes.NUMA_TOPOLOGY}) + {'numa_topology': fakes.NUMA_TOPOLOGY, + 'pci_stats': None}) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) limits_topology = hardware.VirtNUMALimitTopology.from_json( host.limits['numa_topology']) diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py index d5b0315228..c45f86a7f8 100644 --- a/nova/tests/unit/scheduler/test_host_manager.py +++ b/nova/tests/unit/scheduler/test_host_manager.py @@ -492,7 +492,7 @@ class HostStateTestCase(test.NoDBTestCase): instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0, project_id='12345', vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, os_type='Linux', - uuid='fake-uuid') + uuid='fake-uuid', numa_topology=None) host.consume_from_instance(instance) numa_usage_mock.assert_called_once_with(host, instance) self.assertEqual('fake-consumed-once', host.numa_topology) @@ -501,7 +501,7 @@ class HostStateTestCase(test.NoDBTestCase): instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0, project_id='12345', vm_state=vm_states.PAUSED, task_state=None, os_type='Linux', - uuid='fake-uuid') + uuid='fake-uuid', numa_topology=None) host.consume_from_instance(instance) self.assertEqual(2, host.num_instances) diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py index 86b97c2bea..77e34871a8 100644 --- a/nova/tests/unit/virt/libvirt/test_config.py +++ b/nova/tests/unit/virt/libvirt/test_config.py @@ -901,6 +901,35 @@ class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest): <target dir="/mnt"/> </filesystem>""") + def test_config_block(self): + obj = config.LibvirtConfigGuestFilesys() + obj.source_type = "block" + obj.source_dev = "/dev/sdb" + obj.target_dir = "/mnt" + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + <filesystem type="block"> + <source dev="/dev/sdb"/> + <target dir="/mnt"/> + </filesystem>""") + + def test_config_file(self): + obj = config.LibvirtConfigGuestFilesys() + obj.source_type = "file" + obj.source_file = "/data/myimage.qcow2" + obj.driver_type = "nbd" + obj.driver_format = "qcow2" + obj.target_dir = "/mnt" + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + <filesystem type="file"> + <driver format="qcow2" type="nbd"/> + <source file="/data/myimage.qcow2"/> + <target dir="/mnt"/> + </filesystem>""") + class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest): diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index 7b60d6d2fa..6e03a8f3a4 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -1183,6 +1183,144 @@ class LibvirtConnTestCase(test.NoDBTestCase): 'something', 'something')) @mock.patch.object(objects.Flavor, 'get_by_id') + def test_get_guest_config_numa_host_instance_1pci_fits(self, mock_flavor): + instance_ref = objects.Instance(**self.test_instance) + image_meta = {} + flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, + ephemeral_gb=8128, swap=33550336, name='fake', + extra_specs={}) + mock_flavor.return_value = flavor + + caps = vconfig.LibvirtConfigCaps() + caps.host = vconfig.LibvirtConfigCapsHost() + caps.host.cpu = vconfig.LibvirtConfigCPU() + caps.host.cpu.arch = "x86_64" + caps.host.topology = self._fake_caps_numa_topology() + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, + instance_ref, + image_meta) + pci_device_info = dict(test_pci_device.fake_db_dev) + pci_device_info.update(compute_node_id=1, + label='fake', + status='available', + address='0000:00:00.1', + instance_uuid=None, + request_id=None, + extra_info={}, + numa_node=1) + pci_device = objects.PciDevice(**pci_device_info) + + with contextlib.nested( + mock.patch.object(host.Host, 'has_min_version', + return_value=True), + mock.patch.object( + host.Host, "get_capabilities", return_value=caps), + mock.patch.object( + random, 'choice', side_effect=lambda cells: cells[0]), + mock.patch.object(pci_manager, "get_instance_pci_devs", + return_value=[pci_device])): + cfg = conn._get_guest_config(instance_ref, [], {}, disk_info) + self.assertIsNone(instance_ref.numa_topology) + self.assertEqual(set([2, 3]), cfg.cpuset) + self.assertEqual(0, len(cfg.cputune.vcpupin)) + self.assertIsNone(cfg.cpu.numa) + + @mock.patch.object(objects.Flavor, 'get_by_id') + def test_get_guest_config_numa_host_instance_pci_no_numa_info(self, + mock_flavor): + instance_ref = objects.Instance(**self.test_instance) + image_meta = {} + flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, + ephemeral_gb=8128, swap=33550336, name='fake', + extra_specs={}) + mock_flavor.return_value = flavor + + caps = vconfig.LibvirtConfigCaps() + caps.host = vconfig.LibvirtConfigCapsHost() + caps.host.cpu = vconfig.LibvirtConfigCPU() + caps.host.cpu.arch = "x86_64" + caps.host.topology = self._fake_caps_numa_topology() + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, + instance_ref, + image_meta) + pci_device_info = dict(test_pci_device.fake_db_dev) + pci_device_info.update(compute_node_id=1, + label='fake', + status='available', + address='0000:00:00.1', + instance_uuid=None, + request_id=None, + extra_info={}, + numa_node=None) + pci_device = objects.PciDevice(**pci_device_info) + + with contextlib.nested( + mock.patch.object(host.Host, 'has_min_version', + return_value=True), + mock.patch.object( + host.Host, "get_capabilities", return_value=caps), + mock.patch.object( + hardware, 'get_vcpu_pin_set', return_value=set([3])), + mock.patch.object(pci_manager, "get_instance_pci_devs", + return_value=[pci_device])): + cfg = conn._get_guest_config(instance_ref, [], {}, disk_info) + self.assertEqual(set([3]), cfg.cpuset) + self.assertEqual(0, len(cfg.cputune.vcpupin)) + self.assertIsNone(cfg.cpu.numa) + + @mock.patch.object(objects.Flavor, 'get_by_id') + def test_get_guest_config_numa_host_instance_2pci_no_fit(self, + mock_flavor): + instance_ref = objects.Instance(**self.test_instance) + image_meta = {} + flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, + ephemeral_gb=8128, swap=33550336, name='fake', + extra_specs={}) + mock_flavor.return_value = flavor + + caps = vconfig.LibvirtConfigCaps() + caps.host = vconfig.LibvirtConfigCapsHost() + caps.host.cpu = vconfig.LibvirtConfigCPU() + caps.host.cpu.arch = "x86_64" + caps.host.topology = self._fake_caps_numa_topology() + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, + instance_ref, + image_meta) + pci_device_info = dict(test_pci_device.fake_db_dev) + pci_device_info.update(compute_node_id=1, + label='fake', + status='available', + address='0000:00:00.1', + instance_uuid=None, + request_id=None, + extra_info={}, + numa_node=1) + pci_device = objects.PciDevice(**pci_device_info) + pci_device_info.update(numa_node=0, address='0000:00:00.2') + pci_device2 = objects.PciDevice(**pci_device_info) + with contextlib.nested( + mock.patch.object( + host.Host, "get_capabilities", return_value=caps), + mock.patch.object( + hardware, 'get_vcpu_pin_set', return_value=set([3])), + mock.patch.object(random, 'choice'), + mock.patch.object(pci_manager, "get_instance_pci_devs", + return_value=[pci_device, pci_device2]) + ) as (get_host_cap_mock, + get_vcpu_pin_set_mock, choice_mock, pci_mock): + cfg = conn._get_guest_config(instance_ref, [], {}, disk_info) + self.assertFalse(choice_mock.called) + self.assertEqual(set([3]), cfg.cpuset) + self.assertEqual(0, len(cfg.cputune.vcpupin)) + self.assertIsNone(cfg.cpu.numa) + + @mock.patch.object(objects.Flavor, 'get_by_id') def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self, mock_flavor): instance_ref = objects.Instance(**self.test_instance) @@ -9850,6 +9988,155 @@ class LibvirtConnTestCase(test.NoDBTestCase): 'version': '1.0'} self.assertEqual(expected, actual.serialize()) + @mock.patch.object(timeutils, 'utcnow') + @mock.patch.object(host.Host, 'get_domain') + def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain, + mock_utcnow): + xml = """ + <domain type='kvm'> + <devices> + <disk type='file'> + <source file='filename'/> + <target dev='vda' bus='virtio'/> + </disk> + <disk type='block'> + <source dev='/path/to/dev/1'/> + <target dev='vdb' bus='virtio'/> + </disk> + <interface type='network'> + <mac address='52:54:00:a4:38:38'/> + <source network='default'/> + <target dev='vnet0'/> + </interface> + <interface type="bridge"> + <mac address="53:55:00:a5:39:39"/> + <model type="virtio"/> + <target dev="br0"/> + </interface> + </devices> + </domain> + """ + + class DiagFakeDomain(FakeVirtDomain): + + def __init__(self): + super(DiagFakeDomain, self).__init__(fake_xml=xml) + + def vcpus(self): + return ([(0, 1, 15340000000L, 0), + (1, 1, 1640000000L, 0), + (2, 1, 3040000000L, 0), + (3, 1, 1420000000L, 0)], + [(True, False), + (True, False), + (True, False), + (True, False)]) + + def blockStats(self, path): + return (169L, 688640L, 0L, 0L, -1L) + + def interfaceStats(self, path): + return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L) + + def memoryStats(self): + return {'actual': 220160L, 'rss': 200164L} + + def maxMemory(self): + return 280160L + + def fake_get_domain(self): + return DiagFakeDomain() + + mock_get_domain.side_effect = fake_get_domain + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = objects.Instance(**self.test_instance) + actual = drvr.get_diagnostics(instance) + expect = {'cpu0_time': 15340000000L, + 'cpu1_time': 1640000000L, + 'cpu2_time': 3040000000L, + 'cpu3_time': 1420000000L, + 'vda_read': 688640L, + 'vda_read_req': 169L, + 'vda_write': 0L, + 'vda_write_req': 0L, + 'vda_errors': -1L, + 'vdb_read': 688640L, + 'vdb_read_req': 169L, + 'vdb_write': 0L, + 'vdb_write_req': 0L, + 'vdb_errors': -1L, + 'memory': 280160L, + 'memory-actual': 220160L, + 'memory-rss': 200164L, + 'vnet0_rx': 4408L, + 'vnet0_rx_drop': 0L, + 'vnet0_rx_errors': 0L, + 'vnet0_rx_packets': 82L, + 'vnet0_tx': 0L, + 'vnet0_tx_drop': 0L, + 'vnet0_tx_errors': 0L, + 'vnet0_tx_packets': 0L, + 'br0_rx': 4408L, + 'br0_rx_drop': 0L, + 'br0_rx_errors': 0L, + 'br0_rx_packets': 82L, + 'br0_tx': 0L, + 'br0_tx_drop': 0L, + 'br0_tx_errors': 0L, + 'br0_tx_packets': 0L, + } + self.assertEqual(actual, expect) + + lt = datetime.datetime(2012, 11, 22, 12, 00, 00) + diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) + mock_utcnow.return_value = diags_time + + instance.launched_at = lt + actual = drvr.get_instance_diagnostics(instance) + expected = {'config_drive': False, + 'cpu_details': [{'time': 15340000000L}, + {'time': 1640000000L}, + {'time': 3040000000L}, + {'time': 1420000000L}], + 'disk_details': [{'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}, + {'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}], + 'driver': 'libvirt', + 'hypervisor_os': 'linux', + 'memory_details': {'maximum': 2048, 'used': 1234}, + 'nic_details': [{'mac_address': '52:54:00:a4:38:38', + 'rx_drop': 0L, + 'rx_errors': 0L, + 'rx_octets': 4408L, + 'rx_packets': 82L, + 'tx_drop': 0L, + 'tx_errors': 0L, + 'tx_octets': 0L, + 'tx_packets': 0L}, + {'mac_address': '53:55:00:a5:39:39', + 'rx_drop': 0L, + 'rx_errors': 0L, + 'rx_octets': 4408L, + 'rx_packets': 82L, + 'tx_drop': 0L, + 'tx_errors': 0L, + 'tx_octets': 0L, + 'tx_packets': 0L}], + 'state': 'running', + 'uptime': 10., + 'version': '1.0'} + self.assertEqual(expected, actual.serialize()) + @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count(self, mock_list): """Domain can fail to return the vcpu description in case it's @@ -10933,7 +11220,7 @@ Active: 8381604 kB mock.patch.object(drvr, '_set_cache_mode') ) as (volume_save, connect_volume, get_volume_config, set_cache_mode): devices = drvr._get_guest_storage_config(instance, None, - disk_info, False, bdi, flavor) + disk_info, False, bdi, flavor, "hvm") self.assertEqual(3, len(devices)) self.assertEqual('/dev/vdb', instance.default_ephemeral_device) @@ -11234,6 +11521,77 @@ Active: 8381604 kB return_value=1002012): driver.init_host('wibble') + def test_get_guest_config_parallels_vm(self): + self.flags(virt_type='parallels', group='libvirt') + self.flags(images_type='ploop', group='libvirt') + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance_ref = objects.Instance(**self.test_instance) + flavor = instance_ref.get_flavor() + flavor.extra_specs = {} + + image_meta = {} + + disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, + instance_ref, + image_meta) + + cfg = drvr._get_guest_config(instance_ref, + _fake_network_info(self.stubs, 1), + None, disk_info, flavor=flavor) + self.assertEqual("parallels", cfg.virt_type) + self.assertEqual(instance_ref["uuid"], cfg.uuid) + self.assertEqual(2 * units.Mi, cfg.memory) + self.assertEqual(1, cfg.vcpus) + self.assertEqual(vm_mode.HVM, cfg.os_type) + self.assertIsNone(cfg.os_root) + self.assertEqual(6, len(cfg.devices)) + self.assertIsInstance(cfg.devices[0], + vconfig.LibvirtConfigGuestDisk) + self.assertEqual(cfg.devices[0].driver_format, "ploop") + self.assertIsInstance(cfg.devices[1], + vconfig.LibvirtConfigGuestDisk) + self.assertIsInstance(cfg.devices[2], + vconfig.LibvirtConfigGuestInterface) + self.assertIsInstance(cfg.devices[3], + vconfig.LibvirtConfigGuestInput) + self.assertIsInstance(cfg.devices[4], + vconfig.LibvirtConfigGuestGraphics) + self.assertIsInstance(cfg.devices[5], + vconfig.LibvirtConfigGuestVideo) + + def test_get_guest_config_parallels_ct(self): + self.flags(virt_type='parallels', group='libvirt') + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + ct_instance = self.test_instance.copy() + ct_instance["vm_mode"] = vm_mode.EXE + instance_ref = objects.Instance(**ct_instance) + flavor = instance_ref.get_flavor() + flavor.extra_specs = {} + + cfg = drvr._get_guest_config(instance_ref, + _fake_network_info(self.stubs, 1), + None, {'mapping': {}}, flavor=flavor) + self.assertEqual("parallels", cfg.virt_type) + self.assertEqual(instance_ref["uuid"], cfg.uuid) + self.assertEqual(2 * units.Mi, cfg.memory) + self.assertEqual(1, cfg.vcpus) + self.assertEqual(vm_mode.EXE, cfg.os_type) + self.assertEqual("/sbin/init", cfg.os_init_path) + self.assertIsNone(cfg.os_root) + self.assertEqual(4, len(cfg.devices)) + self.assertIsInstance(cfg.devices[0], + vconfig.LibvirtConfigGuestFilesys) + fs = cfg.devices[0] + self.assertEqual(fs.source_type, "file") + self.assertEqual(fs.driver_type, "ploop") + self.assertEqual(fs.target_dir, "/") + self.assertIsInstance(cfg.devices[1], + vconfig.LibvirtConfigGuestInterface) + self.assertIsInstance(cfg.devices[2], + vconfig.LibvirtConfigGuestGraphics) + self.assertIsInstance(cfg.devices[3], + vconfig.LibvirtConfigGuestVideo) + class HostStateTestCase(test.NoDBTestCase): diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py index 988c76534b..bb91c4d687 100644 --- a/nova/tests/unit/virt/libvirt/test_imagebackend.py +++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py @@ -35,6 +35,7 @@ from nova import test from nova.tests.unit import fake_processutils from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.virt import images +from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import imagebackend from nova.virt.libvirt import rbd_utils @@ -124,6 +125,20 @@ class _ImageTestCase(object): self.assertEqual(fake_processutils.fake_execute_get_log(), []) + def test_libvirt_fs_info(self): + image = self.image_class(self.INSTANCE, self.NAME) + fs = image.libvirt_fs_info("/mnt") + # check that exception hasn't been raised and the method + # returned correct object + self.assertIsInstance(fs, vconfig.LibvirtConfigGuestFilesys) + self.assertEqual(fs.target_dir, "/mnt") + if image.is_block_dev: + self.assertEqual(fs.source_type, "block") + self.assertEqual(fs.source_dev, image.path) + else: + self.assertEqual(fs.source_type, "file") + self.assertEqual(fs.source_file, image.path) + class RawTestCase(_ImageTestCase, test.NoDBTestCase): @@ -1249,6 +1264,74 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase): self.assertEqual(image.path, rbd_path) +class PloopTestCase(_ImageTestCase, test.NoDBTestCase): + SIZE = 1024 + + def setUp(self): + self.image_class = imagebackend.Ploop + super(PloopTestCase, self).setUp() + self.utils = imagebackend.utils + self.stubs.Set(imagebackend.Ploop, 'get_disk_size', lambda a, b: 2048) + + def prepare_mocks(self): + fn = self.mox.CreateMockAnything() + self.mox.StubOutWithMock(imagebackend.utils.synchronized, + '__call__') + self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image') + self.mox.StubOutWithMock(self.utils, 'execute') + return fn + + def test_cache(self): + self.mox.StubOutWithMock(os.path, 'exists') + if self.OLD_STYLE_INSTANCE_PATH: + os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) + os.path.exists(self.TEMPLATE_DIR).AndReturn(False) + os.path.exists(self.PATH).AndReturn(False) + os.path.exists(self.TEMPLATE_PATH).AndReturn(False) + fn = self.mox.CreateMockAnything() + fn(target=self.TEMPLATE_PATH) + self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree') + imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR) + self.mox.ReplayAll() + + image = self.image_class(self.INSTANCE, self.NAME) + self.mock_create_image(image) + image.cache(fn, self.TEMPLATE) + + self.mox.VerifyAll() + + def test_create_image(self): + fn = self.prepare_mocks() + fn(target=self.TEMPLATE_PATH, max_size=2048, image_id=None) + img_path = os.path.join(self.PATH, "root.hds") + imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, img_path) + self.utils.execute("ploop", "restore-descriptor", "-f", "raw", + self.PATH, img_path) + self.utils.execute("ploop", "grow", '-s', "2K", + os.path.join(self.PATH, "DiskDescriptor.xml"), + run_as_root=True) + self.mox.ReplayAll() + + image = self.image_class(self.INSTANCE, self.NAME) + image.create_image(fn, self.TEMPLATE_PATH, 2048, image_id=None) + + self.mox.VerifyAll() + + def test_prealloc_image(self): + self.flags(preallocate_images='space') + fake_processutils.fake_execute_clear_log() + fake_processutils.stub_out_processutils_execute(self.stubs) + image = self.image_class(self.INSTANCE, self.NAME) + + def fake_fetch(target, *args, **kwargs): + return + + self.stubs.Set(os.path, 'exists', lambda _: True) + self.stubs.Set(image, 'check_image_exists', lambda: True) + + image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE) + + class BackendTestCase(test.NoDBTestCase): INSTANCE = {'name': 'fake-instance', 'uuid': uuidutils.generate_uuid()} diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py index 492a02ac25..86b9e94c13 100644 --- a/nova/tests/unit/virt/test_hardware.py +++ b/nova/tests/unit/virt/test_hardware.py @@ -22,6 +22,7 @@ from nova import context from nova import exception from nova import objects from nova.objects import base as base_obj +from nova.pci import stats from nova import test from nova.virt import hardware as hw @@ -1421,6 +1422,34 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase): self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology) self.assertEqual(2, fitted_instance2.cells[0].id) + def test_get_fitting_pci_success(self): + pci_request = objects.InstancePCIRequest(count=1, + spec=[{'vendor_id': '8086'}]) + pci_reqs = [pci_request] + pci_stats = stats.PciDeviceStats() + with mock.patch.object(stats.PciDeviceStats, + 'support_requests', return_value= True): + fitted_instance1 = hw.numa_fit_instance_to_host(self.host, + self.instance1, + pci_requests=pci_reqs, + pci_stats=pci_stats) + self.assertIsInstance(fitted_instance1, + objects.InstanceNUMATopology) + + def test_get_fitting_pci_fail(self): + pci_request = objects.InstancePCIRequest(count=1, + spec=[{'vendor_id': '8086'}]) + pci_reqs = [pci_request] + pci_stats = stats.PciDeviceStats() + with mock.patch.object(stats.PciDeviceStats, + 'support_requests', return_value= False): + fitted_instance1 = hw.numa_fit_instance_to_host( + self.host, + self.instance1, + pci_requests=pci_reqs, + pci_stats=pci_stats) + self.assertIsNone(fitted_instance1) + class NumberOfSerialPortsTest(test.NoDBTestCase): def test_flavor(self): diff --git a/nova/tests/unit/virt/vmwareapi/test_vmops.py b/nova/tests/unit/virt/vmwareapi/test_vmops.py index e46e41c68c..867cf3e1d8 100644 --- a/nova/tests/unit/virt/vmwareapi/test_vmops.py +++ b/nova/tests/unit/virt/vmwareapi/test_vmops.py @@ -1455,3 +1455,20 @@ class VMwareVMOpsTestCase(test.NoDBTestCase): pbm_default_policy='default-policy', group='vmware') extra_specs = self._vmops._get_extra_specs(flavor) self.assertEqual('flavor-policy', extra_specs.storage_policy) + + def test_get_base_folder_not_set(self): + self.flags(image_cache_subdirectory_name='vmware_base') + base_folder = self._vmops._get_base_folder() + self.assertEqual('vmware_base', base_folder) + + def test_get_base_folder_host_ip(self): + self.flags(my_ip='7.7.7.7', + image_cache_subdirectory_name='_base') + base_folder = self._vmops._get_base_folder() + self.assertEqual('7.7.7.7_base', base_folder) + + def test_get_base_folder_cache_prefix(self): + self.flags(cache_prefix='my_prefix', group='vmware') + self.flags(image_cache_subdirectory_name='_base') + base_folder = self._vmops._get_base_folder() + self.assertEqual('my_prefix_base', base_folder) diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py index 6eb3e137c5..ddc84fbdd0 100644 --- a/nova/virt/hardware.py +++ b/nova/virt/hardware.py @@ -1126,12 +1126,15 @@ class VirtNUMALimitTopology(VirtNUMATopology): def numa_fit_instance_to_host( - host_topology, instance_topology, limits_topology=None): + host_topology, instance_topology, limits_topology=None, + pci_requests=None, pci_stats=None): """Fit the instance topology onto the host topology given the limits :param host_topology: objects.NUMATopology object to fit an instance on :param instance_topology: objects.InstanceNUMATopology to be fitted :param limits_topology: VirtNUMALimitTopology that defines limits + :param pci_requests: instance pci_requests + :param pci_stats: pci_stats for the host Given a host and instance topology and optionally limits - this method will attempt to fit instance cells onto all permutations of host cells @@ -1163,7 +1166,12 @@ def numa_fit_instance_to_host( break cells.append(got_cell) if len(cells) == len(host_cell_perm): - return objects.InstanceNUMATopology(cells=cells) + if not pci_requests: + return objects.InstanceNUMATopology(cells=cells) + elif ((pci_stats is not None) and + pci_stats.support_requests(pci_requests, + cells)): + return objects.InstanceNUMATopology(cells=cells) def _numa_pagesize_usage_from_cell(hostcell, instancecell, sign): diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 14dfa946cf..5e430b2269 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -1047,14 +1047,25 @@ class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice): self.source_type = "mount" self.source_dir = None + self.source_file = None + self.source_dev = None self.target_dir = "/" + self.driver_type = "loop" + self.driver_format = "raw" def format_dom(self): dev = super(LibvirtConfigGuestFilesys, self).format_dom() dev.set("type", self.source_type) - dev.append(etree.Element("source", dir=self.source_dir)) + if self.source_type == "file": + dev.append(etree.Element("driver", type = self.driver_type, + format = self.driver_format)) + dev.append(etree.Element("source", file=self.source_file)) + elif self.source_type == "block": + dev.append(etree.Element("source", dev=self.source_dev)) + else: + dev.append(etree.Element("source", dir=self.source_dir)) dev.append(etree.Element("target", dir=self.target_dir)) return dev diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index f87d114351..cfae7d3a5e 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -3099,10 +3099,16 @@ class LibvirtDriver(driver.ComputeDriver): inst_type['extra_specs'], self._host.get_version()) + def _get_guest_fs_config(self, instance, name, image_type=None): + image = self.image_backend.image(instance, + name, + image_type) + return image.libvirt_fs_info("/", "ploop") + def _get_guest_storage_config(self, instance, image_meta, disk_info, rescue, block_device_info, - inst_type): + inst_type, os_type): devices = [] disk_mapping = disk_info['mapping'] @@ -3115,6 +3121,9 @@ class LibvirtDriver(driver.ComputeDriver): fs.source_dir = os.path.join( libvirt_utils.get_instance_path(instance), 'rootfs') devices.append(fs) + elif os_type == vm_mode.EXE and CONF.libvirt.virt_type == "parallels": + fs = self._get_guest_fs_config(instance, "disk") + devices.append(fs) else: if rescue: @@ -3376,7 +3385,7 @@ class LibvirtDriver(driver.ComputeDriver): guest_cpu_numa.cells.append(guest_cell) return guest_cpu_numa - def _get_guest_numa_config(self, instance_numa_topology, flavor, + def _get_guest_numa_config(self, instance_numa_topology, flavor, pci_devs, allowed_cpus=None): """Returns the config objects for the guest NUMA specs. @@ -3417,24 +3426,27 @@ class LibvirtDriver(driver.ComputeDriver): memory = flavor.memory_mb if topology: # Host is NUMA capable so try to keep the instance in a cell - viable_cells_cpus = [] - for cell in topology.cells: + pci_cells = {pci.numa_node for pci in pci_devs} + if len(pci_cells) == 0: + viable_cells_cpus = [] + for cell in topology.cells: + if vcpus <= len(cell.cpuset) and memory <= cell.memory: + viable_cells_cpus.append(cell.cpuset) + + if viable_cells_cpus: + pin_cpuset = random.choice(viable_cells_cpus) + return GuestNumaConfig(pin_cpuset, None, None, None) + elif len(pci_cells) == 1 and None not in pci_cells: + cell = topology.cells[pci_cells.pop()] if vcpus <= len(cell.cpuset) and memory <= cell.memory: - viable_cells_cpus.append(cell.cpuset) - - if not viable_cells_cpus: - # We can't contain the instance in a cell - do nothing for - # now. - # TODO(ndipanov): Attempt to spread the instance across - # NUMA nodes and expose the topology to the instance as an - # optimisation - return GuestNumaConfig(allowed_cpus, None, None, None) - else: - pin_cpuset = random.choice(viable_cells_cpus) - return GuestNumaConfig(pin_cpuset, None, None, None) - else: - # We have no NUMA topology in the host either - return GuestNumaConfig(allowed_cpus, None, None, None) + return GuestNumaConfig(cell.cpuset, None, None, None) + + # We have no NUMA topology in the host either, + # or we can't find a single cell to acomodate the instance + # TODO(ndipanov): Attempt to spread the instance + # accross NUMA nodes and expose the topology to the + # instance as an optimisation + return GuestNumaConfig(allowed_cpus, None, None, None) else: if topology: # Now get the CpuTune configuration from the numa_topology @@ -3805,6 +3817,9 @@ class LibvirtDriver(driver.ComputeDriver): elif virt_type == "uml": guest.os_kernel = "/usr/bin/linux" guest.os_root = root_device_name + elif virt_type == "parallels": + if guest.os_type == vm_mode.EXE: + guest.os_init_path = "/sbin/init" def _conf_non_lxc_uml(self, virt_type, guest, root_device_name, rescue, instance, inst_path, image_meta, disk_info): @@ -3857,9 +3872,10 @@ class LibvirtDriver(driver.ComputeDriver): guest.memory = flavor.memory_mb * units.Ki guest.vcpus = flavor.vcpus allowed_cpus = hardware.get_vcpu_pin_set() + pci_devs = pci_manager.get_instance_pci_devs(instance, 'all') guest_numa_config = self._get_guest_numa_config( - instance.numa_topology, flavor, allowed_cpus) + instance.numa_topology, flavor, pci_devs, allowed_cpus) guest.cpuset = guest_numa_config.cpuset guest.cputune = guest_numa_config.cputune @@ -3905,7 +3921,7 @@ class LibvirtDriver(driver.ComputeDriver): storage_configs = self._get_guest_storage_config( instance, image_meta, disk_info, rescue, block_device_info, - flavor) + flavor, guest.os_type) for config in storage_configs: guest.add_device(config) @@ -3981,7 +3997,7 @@ class LibvirtDriver(driver.ComputeDriver): for pci_dev in pci_manager.get_instance_pci_devs(instance): guest.add_device(self._get_guest_pci_device(pci_dev)) else: - if len(pci_manager.get_instance_pci_devs(instance)) > 0: + if len(pci_devs) > 0: raise exception.PciDeviceUnsupportedHypervisor( type=virt_type) @@ -4302,13 +4318,9 @@ class LibvirtDriver(driver.ComputeDriver): continue except Exception: continue - ret = doc.findall('./devices/disk') - for node in ret: - if node.get('type') != 'block': - continue - for child in node.getchildren(): - if child.tag == 'source': - devices.append(child.get('dev')) + sources = doc.findall("./devices/disk[@type='block']/source") + for source in sources: + devices.append(source.get('dev')) return devices def _get_interfaces(self, xml): @@ -6171,14 +6183,10 @@ class LibvirtDriver(driver.ComputeDriver): pass # Update mac addresses of interface if stats have been reported - if len(diags.nic_details) > 0: - ret = xml_doc.findall('./devices/interface') - index = 0 - for node in ret: - for child in node.getchildren(): - if child.tag == 'mac': - diags.nic_details[index].mac_address = child.get( - 'address') + if diags.nic_details: + nodes = xml_doc.findall('./devices/interface/mac') + for index, node in enumerate(nodes): + diags.nic_details[index].mac_address = node.get('address') return diags def instance_on_disk(self, instance): diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index b0932a228b..ebdf2db0cc 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -15,7 +15,9 @@ import abc import contextlib +import functools import os +import shutil from oslo.config import cfg from oslo.serialization import jsonutils @@ -168,6 +170,33 @@ class Image(object): setattr(info, scope[1], value) return info + def libvirt_fs_info(self, target, driver_type=None): + """Get `LibvirtConfigGuestFilesys` filled for this image. + + :target: target directory inside a container. + :driver_type: filesystem driver type, can be loop + nbd or ploop. + """ + info = vconfig.LibvirtConfigGuestFilesys() + info.target_dir = target + + if self.is_block_dev: + info.source_type = "block" + info.source_dev = self.path + else: + info.source_type = "file" + info.source_file = self.path + info.driver_format = self.driver_format + if driver_type: + info.driver_type = driver_type + else: + if self.driver_format == "raw": + info.driver_type = "loop" + else: + info.driver_type = "nbd" + + return info + def check_image_exists(self): return os.path.exists(self.path) @@ -738,6 +767,64 @@ class Rbd(Image): reason=reason) +class Ploop(Image): + def __init__(self, instance=None, disk_name=None, path=None): + super(Ploop, self).__init__("file", "ploop", is_block_dev=False) + + self.path = (path or + os.path.join(libvirt_utils.get_instance_path(instance), + disk_name)) + self.resolve_driver_format() + + def create_image(self, prepare_template, base, size, *args, **kwargs): + filename = os.path.split(base)[-1] + + @utils.synchronized(filename, external=True, lock_path=self.lock_path) + def create_ploop_image(base, target, size): + image_path = os.path.join(target, "root.hds") + libvirt_utils.copy_image(base, image_path) + utils.execute('ploop', 'restore-descriptor', '-f', self.pcs_format, + target, image_path) + if size: + dd_path = os.path.join(self.path, "DiskDescriptor.xml") + utils.execute('ploop', 'grow', '-s', '%dK' % (size >> 10), + dd_path, run_as_root=True) + + if not os.path.exists(self.path): + if CONF.force_raw_images: + self.pcs_format = "raw" + else: + image_meta = IMAGE_API.get(kwargs["context"], + kwargs["image_id"]) + format = image_meta.get("disk_format") + if format == "ploop": + self.pcs_format = "expanded" + elif format == "raw": + self.pcs_format = "raw" + else: + reason = _("PCS doesn't support images in %s format." + " You should either set force_raw_images=True" + " in config or upload an image in ploop" + " or raw format.") % format + raise exception.ImageUnacceptable( + image_id=kwargs["image_id"], + reason=reason) + + if not os.path.exists(base): + prepare_template(target=base, max_size=size, *args, **kwargs) + self.verify_base_size(base, size) + + if os.path.exists(self.path): + return + + fileutils.ensure_tree(self.path) + + remove_func = functools.partial(fileutils.delete_if_exists, + remove=shutil.rmtree) + with fileutils.remove_path_on_error(self.path, remove=remove_func): + create_ploop_image(base, self.path, size) + + class Backend(object): def __init__(self, use_cow): self.BACKEND = { @@ -745,6 +832,7 @@ class Backend(object): 'qcow2': Qcow2, 'lvm': Lvm, 'rbd': Rbd, + 'ploop': Ploop, 'default': Qcow2 if use_cow else Raw } diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index c9b03ce8c2..8cb15a1e5b 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -173,7 +173,7 @@ def get_vm_create_spec(client_factory, instance, name, data_store_name, config_spec.memoryMB = int(instance['memory_mb']) # Configure cpu information - if (extra_specs.has_cpu_limits()): + if extra_specs.has_cpu_limits(): allocation = client_factory.create('ns0:ResourceAllocationInfo') if extra_specs.cpu_limits.cpu_limit: allocation.limit = extra_specs.cpu_limits.cpu_limit diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 8da17b387d..d4081ced0e 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -57,8 +57,19 @@ from nova.virt.vmwareapi import vif as vmwarevif from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util +vmops_opts = [ + cfg.StrOpt('cache_prefix', + help='The prefix for Where cached images are stored. This is ' + 'NOT the full path - just a folder prefix. ' + 'This should only be used when a datastore cache should ' + 'be shared between compute nodes. Note: this should only ' + 'be used when the compute nodes have a shared file ' + 'system.'), + ] CONF = cfg.CONF +CONF.register_opts(vmops_opts, 'vmware') + CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('remove_unused_base_images', 'nova.virt.imagecache') CONF.import_opt('vnc_enabled', 'nova.vnc') @@ -151,13 +162,7 @@ class VMwareVMOps(object): self._root_resource_pool = vm_util.get_res_pool_ref(self._session, self._cluster) self._datastore_regex = datastore_regex - # Ensure that the base folder is unique per compute node - if CONF.remove_unused_base_images: - self._base_folder = '%s%s' % (CONF.my_ip, - CONF.image_cache_subdirectory_name) - else: - # Aging disable ensures backward compatibility - self._base_folder = CONF.image_cache_subdirectory_name + self._base_folder = self._get_base_folder() self._tmp_folder = 'vmware_temp' self._rescue_suffix = '-rescue' self._migrate_suffix = '-orig' @@ -166,6 +171,20 @@ class VMwareVMOps(object): self._imagecache = imagecache.ImageCacheManager(self._session, self._base_folder) + def _get_base_folder(self): + # Enable more than one compute node to run on the same host + if CONF.vmware.cache_prefix: + base_folder = '%s%s' % (CONF.vmware.cache_prefix, + CONF.image_cache_subdirectory_name) + # Ensure that the base folder is unique per compute node + elif CONF.remove_unused_base_images: + base_folder = '%s%s' % (CONF.my_ip, + CONF.image_cache_subdirectory_name) + else: + # Aging disable ensures backward compatibility + base_folder = CONF.image_cache_subdirectory_name + return base_folder + def _extend_virtual_disk(self, instance, requested_size, name, dc_ref): service_content = self._session.vim.service_content LOG.debug("Extending root virtual disk to %s", requested_size) diff --git a/test-requirements.txt b/test-requirements.txt index 49c56cc348..8b044955aa 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,6 +10,7 @@ mock>=1.0 mox3>=0.7.0 MySQL-python psycopg2 +python-barbicanclient>=3.0.1 python-ironicclient>=0.2.1 python-subunit>=0.0.18 requests-mock>=0.5.1 # Apache-2.0 |