summaryrefslogtreecommitdiff
path: root/nova/virt
diff options
context:
space:
mode:
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/block_device.py89
-rw-r--r--nova/virt/driver.py62
-rw-r--r--nova/virt/fake.py105
-rw-r--r--nova/virt/hardware.py304
-rw-r--r--nova/virt/hyperv/driver.py9
-rw-r--r--nova/virt/hyperv/serialproxy.py4
-rw-r--r--nova/virt/images.py31
-rw-r--r--nova/virt/interfaces.template6
-rw-r--r--nova/virt/ironic/driver.py73
-rw-r--r--nova/virt/libvirt/blockinfo.py85
-rw-r--r--nova/virt/libvirt/config.py320
-rw-r--r--nova/virt/libvirt/cpu/__init__.py (renamed from nova/virt/powervm/disk/__init__.py)0
-rw-r--r--nova/virt/libvirt/cpu/api.py157
-rw-r--r--nova/virt/libvirt/cpu/core.py78
-rw-r--r--nova/virt/libvirt/driver.py1175
-rw-r--r--nova/virt/libvirt/event.py7
-rw-r--r--nova/virt/libvirt/guest.py28
-rw-r--r--nova/virt/libvirt/host.py272
-rw-r--r--nova/virt/libvirt/imagebackend.py111
-rw-r--r--nova/virt/libvirt/migration.py13
-rw-r--r--nova/virt/libvirt/utils.py163
-rw-r--r--nova/virt/libvirt/vif.py2
-rw-r--r--nova/virt/libvirt/volume/fibrechannel.py3
-rw-r--r--nova/virt/libvirt/volume/lightos.py63
-rw-r--r--nova/virt/libvirt/volume/nvme.py1
-rw-r--r--nova/virt/netutils.py9
-rw-r--r--nova/virt/node.py108
-rw-r--r--nova/virt/powervm/__init__.py17
-rw-r--r--nova/virt/powervm/disk/driver.py268
-rw-r--r--nova/virt/powervm/disk/localdisk.py211
-rw-r--r--nova/virt/powervm/disk/ssp.py258
-rw-r--r--nova/virt/powervm/driver.py708
-rw-r--r--nova/virt/powervm/host.py66
-rw-r--r--nova/virt/powervm/image.py62
-rw-r--r--nova/virt/powervm/media.py237
-rw-r--r--nova/virt/powervm/mgmt.py175
-rw-r--r--nova/virt/powervm/tasks/__init__.py0
-rw-r--r--nova/virt/powervm/tasks/base.py38
-rw-r--r--nova/virt/powervm/tasks/image.py81
-rw-r--r--nova/virt/powervm/tasks/network.py259
-rw-r--r--nova/virt/powervm/tasks/storage.py429
-rw-r--r--nova/virt/powervm/tasks/vm.py154
-rw-r--r--nova/virt/powervm/vif.py373
-rw-r--r--nova/virt/powervm/vm.py543
-rw-r--r--nova/virt/powervm/volume/__init__.py28
-rw-r--r--nova/virt/powervm/volume/fcvscsi.py468
-rw-r--r--nova/virt/vmwareapi/constants.py3
-rw-r--r--nova/virt/vmwareapi/driver.py64
-rw-r--r--nova/virt/vmwareapi/session.py157
-rw-r--r--nova/virt/vmwareapi/vm_util.py104
-rw-r--r--nova/virt/vmwareapi/vmops.py10
-rw-r--r--nova/virt/vmwareapi/volumeops.py96
-rw-r--r--nova/virt/zvm/driver.py1
-rw-r--r--nova/virt/zvm/hypervisor.py2
54 files changed, 3090 insertions, 5000 deletions
diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py
index 4a41703174..28a866a817 100644
--- a/nova/virt/block_device.py
+++ b/nova/virt/block_device.py
@@ -227,9 +227,70 @@ class DriverSwapBlockDevice(DriverBlockDevice):
})
+class DriverImageBlockDevice(DriverBlockDevice):
+ _valid_source = 'image'
+ _proxy_as_attr_inherited = set(['image_id'])
+ _new_only_fields = set([
+ 'disk_bus',
+ 'device_type',
+ 'guest_format',
+ 'boot_index',
+ 'encrypted',
+ 'encryption_secret_uuid',
+ 'encryption_format',
+ 'encryption_options'
+ ])
+ _fields = set([
+ 'device_name',
+ 'size']) | _new_only_fields
+ _legacy_fields = (
+ _fields - _new_only_fields | set(['num', 'virtual_name']))
+ _update_on_save = {
+ 'disk_bus': None,
+ 'device_name': None,
+ 'device_type': None,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
+ }
+
+ def _transform(self):
+ if (not self._bdm_obj.get('source_type') == 'image' or
+ not self._bdm_obj.get('destination_type') == 'local'):
+ raise _InvalidType
+ self.update({
+ 'device_name': self._bdm_obj.device_name,
+ 'size': self._bdm_obj.volume_size or 0,
+ 'disk_bus': self._bdm_obj.disk_bus,
+ 'device_type': self._bdm_obj.device_type,
+ 'guest_format': self._bdm_obj.guest_format,
+ 'image_id': self._bdm_obj.image_id,
+ 'boot_index': 0,
+ 'encrypted': self._bdm_obj.encrypted,
+ 'encryption_secret_uuid': self._bdm_obj.encryption_secret_uuid,
+ 'encryption_format': self._bdm_obj.encryption_format,
+ 'encryption_options': self._bdm_obj.encryption_options
+ })
+
+
class DriverEphemeralBlockDevice(DriverBlockDevice):
- _new_only_fields = set(['disk_bus', 'device_type', 'guest_format'])
+ _new_only_fields = set([
+ 'disk_bus',
+ 'device_type',
+ 'guest_format',
+ 'encrypted',
+ 'encryption_secret_uuid',
+ 'encryption_format',
+ 'encryption_options'])
_fields = set(['device_name', 'size']) | _new_only_fields
+ _update_on_save = {
+ 'disk_bus': None,
+ 'device_name': None,
+ 'device_type': None,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
+ }
def _transform(self):
if not block_device.new_format_is_ephemeral(self._bdm_obj):
@@ -239,7 +300,11 @@ class DriverEphemeralBlockDevice(DriverBlockDevice):
'size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus,
'device_type': self._bdm_obj.device_type,
- 'guest_format': self._bdm_obj.guest_format
+ 'guest_format': self._bdm_obj.guest_format,
+ 'encrypted': self._bdm_obj.encrypted,
+ 'encryption_secret_uuid': self._bdm_obj.encryption_secret_uuid,
+ 'encryption_format': self._bdm_obj.encryption_format,
+ 'encryption_options': self._bdm_obj.encryption_options
})
@@ -802,15 +867,15 @@ def _convert_block_devices(device_type, block_device_mapping):
convert_swap = functools.partial(_convert_block_devices,
DriverSwapBlockDevice)
+convert_local_images = functools.partial(_convert_block_devices,
+ DriverImageBlockDevice)
convert_ephemerals = functools.partial(_convert_block_devices,
DriverEphemeralBlockDevice)
-
convert_volumes = functools.partial(_convert_block_devices,
DriverVolumeBlockDevice)
-
convert_snapshots = functools.partial(_convert_block_devices,
DriverVolSnapshotBlockDevice)
@@ -897,9 +962,15 @@ def get_swap(transformed_list):
return None
-_IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice,
- DriverVolumeBlockDevice, DriverVolSnapshotBlockDevice,
- DriverVolImageBlockDevice, DriverVolBlankBlockDevice)
+_IMPLEMENTED_CLASSES = (
+ DriverSwapBlockDevice,
+ DriverEphemeralBlockDevice,
+ DriverVolumeBlockDevice,
+ DriverVolSnapshotBlockDevice,
+ DriverVolImageBlockDevice,
+ DriverVolBlankBlockDevice,
+ DriverImageBlockDevice
+)
def is_implemented(bdm):
@@ -912,6 +983,10 @@ def is_implemented(bdm):
return False
+def is_local_image(bdm):
+ return bdm.source_type == 'image' and bdm.destination_type == 'local'
+
+
def is_block_device_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank') and
bdm.destination_type == 'volume' and
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index b20e0c6bf7..5d42a392d8 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -20,7 +20,9 @@ Driver base-classes:
types that support that contract
"""
+import itertools
import sys
+import typing as ty
import os_resource_classes as orc
import os_traits
@@ -32,6 +34,7 @@ from nova import context as nova_context
from nova.i18n import _
from nova import objects
from nova.virt import event as virtevent
+import nova.virt.node
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -44,6 +47,7 @@ def get_block_device_info(instance, block_device_mapping):
of a dict containing the following keys:
- root_device_name: device name of the root disk
+ - image: An instance of DriverImageBlockDevice or None
- ephemerals: a (potentially empty) list of DriverEphemeralBlockDevice
instances
- swap: An instance of DriverSwapBlockDevice or None
@@ -52,18 +56,18 @@ def get_block_device_info(instance, block_device_mapping):
specialized subclasses.
"""
from nova.virt import block_device as virt_block_device
-
- block_device_info = {
+ return {
'root_device_name': instance.root_device_name,
+ 'image': virt_block_device.convert_local_images(
+ block_device_mapping),
'ephemerals': virt_block_device.convert_ephemerals(
block_device_mapping),
'block_device_mapping':
- virt_block_device.convert_all_volumes(*block_device_mapping)
+ virt_block_device.convert_all_volumes(*block_device_mapping),
+ 'swap':
+ virt_block_device.get_swap(
+ virt_block_device.convert_swap(block_device_mapping))
}
- swap_list = virt_block_device.convert_swap(block_device_mapping)
- block_device_info['swap'] = virt_block_device.get_swap(swap_list)
-
- return block_device_info
def block_device_info_get_root_device(block_device_info):
@@ -81,6 +85,14 @@ def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
+def block_device_info_get_image(block_device_info):
+ block_device_info = block_device_info or {}
+ # get_disk_mapping() supports block_device_info=None and thus requires that
+ # we return a list here.
+ image = block_device_info.get('image') or []
+ return image
+
+
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
@@ -93,6 +105,19 @@ def block_device_info_get_mapping(block_device_info):
return block_device_mapping
+def block_device_info_get_encrypted_disks(
+ block_device_info: ty.Mapping[str, ty.Any],
+) -> ty.List['nova.virt.block_device.DriverBlockDevice']:
+ block_device_info = block_device_info or {}
+ return [
+ driver_bdm for driver_bdm in itertools.chain(
+ block_device_info.get('image', []),
+ block_device_info.get('ephemerals', []),
+ )
+ if driver_bdm.get('encrypted')
+ ]
+
+
# NOTE(aspiers): When adding new capabilities, ensure they are
# mirrored in ComputeDriver.capabilities, and that the corresponding
# values should always be standard traits in os_traits. If something
@@ -126,11 +151,17 @@ CAPABILITY_TRAITS_MAP = {
"supports_secure_boot": os_traits.COMPUTE_SECURITY_UEFI_SECURE_BOOT,
"supports_socket_pci_numa_affinity":
os_traits.COMPUTE_SOCKET_PCI_NUMA_AFFINITY,
+ "supports_remote_managed_ports": os_traits.COMPUTE_REMOTE_MANAGED_PORTS,
+ "supports_ephemeral_encryption": os_traits.COMPUTE_EPHEMERAL_ENCRYPTION,
+ "supports_ephemeral_encryption_luks":
+ os_traits.COMPUTE_EPHEMERAL_ENCRYPTION_LUKS,
+ "supports_ephemeral_encryption_plain":
+ os_traits.COMPUTE_EPHEMERAL_ENCRYPTION_PLAIN,
}
def _check_image_type_exclude_list(capability, supported):
- """Enforce the exclusion list on image_type capabilites.
+ """Enforce the exclusion list on image_type capabilities.
:param capability: The supports_image_type_foo capability being checked
:param supported: The flag indicating whether the virt driver *can*
@@ -194,6 +225,12 @@ class ComputeDriver(object):
"supports_vtpm": False,
"supports_secure_boot": False,
"supports_socket_pci_numa_affinity": False,
+ "supports_remote_managed_ports": False,
+
+ # Ephemeral encryption support flags
+ "supports_ephemeral_encryption": False,
+ "supports_ephemeral_encryption_luks": False,
+ "supports_ephemeral_encryption_plain": False,
# Image type support flags
"supports_image_type_aki": False,
@@ -297,7 +334,8 @@ class ComputeDriver(object):
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None,
evacuate=False, block_device_info=None,
- preserve_ephemeral=False, accel_uuids=None):
+ preserve_ephemeral=False, accel_uuids=None,
+ reimage_boot_volume=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -335,6 +373,7 @@ class ComputeDriver(object):
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
:param accel_uuids: Accelerator UUIDs.
+ :param reimage_boot_volume: Re-image the volume backed instance.
"""
raise NotImplementedError()
@@ -1557,6 +1596,11 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
+ def get_nodenames_by_uuid(self, refresh=False):
+ """Returns a dict of {uuid: nodename} for all managed nodes."""
+ nodename = self.get_available_nodes()[0]
+ return {nova.virt.node.get_local_node_uuid(): nodename}
+
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
if nodename in self.get_available_nodes():
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 41524b69d2..bf7dc8fc72 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -32,6 +32,7 @@ import fixtures
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
from nova.compute import power_state
@@ -48,6 +49,7 @@ from nova.objects import migrate_data
from nova.virt import driver
from nova.virt import hardware
from nova.virt.ironic import driver as ironic
+import nova.virt.node
from nova.virt import virtapi
CONF = nova.conf.CONF
@@ -116,6 +118,7 @@ class FakeDriver(driver.ComputeDriver):
"supports_trusted_certs": True,
"supports_pcpus": False,
"supports_accelerators": True,
+ "supports_remote_managed_ports": True,
# Supported image types
"supports_image_type_raw": True,
@@ -159,8 +162,8 @@ class FakeDriver(driver.ComputeDriver):
self._host = host
# NOTE(gibi): this is unnecessary complex and fragile but this is
# how many current functional sample tests expect the node name.
- self._nodes = (['fake-mini'] if self._host == 'compute'
- else [self._host])
+ self._set_nodes(['fake-mini'] if self._host == 'compute'
+ else [self._host])
def _set_nodes(self, nodes):
# NOTE(gibi): this is not part of the driver interface but used
@@ -503,6 +506,12 @@ class FakeDriver(driver.ComputeDriver):
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
+ # NOTE(danms): Because the fake driver runs on the same host
+ # in tests, potentially with multiple nodes, we need to
+ # control our node uuids. Make sure we return a unique and
+ # consistent uuid for each node we are responsible for to
+ # avoid the persistent local node identity from taking over.
+ host_status['uuid'] = str(getattr(uuids, 'node_%s' % nodename))
return host_status
def update_provider_tree(self, provider_tree, nodename, allocations=None):
@@ -645,6 +654,10 @@ class FakeDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return self._nodes
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {str(getattr(uuids, 'node_%s' % n)): n
+ for n in self.get_available_nodes()}
+
def instance_on_disk(self, instance):
return False
@@ -763,7 +776,7 @@ class PredictableNodeUUIDDriver(SmallFakeDriver):
PredictableNodeUUIDDriver, self).get_available_resource(nodename)
# This is used in ComputeNode.update_from_virt_driver which is called
# from the ResourceTracker when creating a ComputeNode.
- resources['uuid'] = uuid.uuid5(uuid.NAMESPACE_DNS, nodename)
+ resources['uuid'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, nodename))
return resources
@@ -890,6 +903,36 @@ class FakeLiveMigrateDriverWithNestedCustomResources(
class FakeDriverWithPciResources(SmallFakeDriver):
+ """NOTE: this driver provides symmetric compute nodes. Each compute will
+ have the same resources with the same addresses. It is dangerous as using
+ this driver can hide issues when in an asymmetric environment nova fails to
+ update entities according to the host specific addresses (e.g. pci_slot of
+ the neutron port bindings).
+
+ The current non virt driver specific functional test environment has many
+ shortcomings making it really hard to simulate host specific virt drivers.
+
+ 1) The virt driver is instantiated by the service logic from the name of
+ the driver class. This makes passing input to the driver instance from the
+ test at init time pretty impossible. This could be solved with some
+ fixtures around nova.virt.driver.load_compute_driver()
+
+ 2) The compute service access the hypervisor not only via the virt
+ interface but also reads the sysfs of the host. So simply providing a fake
+ virt driver instance is not enough to isolate simulated compute services
+ that are running on the same host. Also these low level sysfs reads are not
+ having host specific information in the call params. So simply mocking the
+ low level call does not give a way to provide host specific return values.
+
+ 3) CONF is global, and it is read dynamically by the driver. So
+ providing host specific CONF to driver instances without race conditions
+ between the drivers are extremely hard especially if periodic tasks are
+ enabled.
+
+ The libvirt based functional test env under nova.tests.functional.libvirt
+ has better support to create asymmetric environments. So please consider
+ using that if possible instead.
+ """
PCI_ADDR_PF1 = '0000:01:00.0'
PCI_ADDR_PF1_VF1 = '0000:01:00.1'
@@ -905,7 +948,7 @@ class FakeDriverWithPciResources(SmallFakeDriver):
def setUp(self):
super(FakeDriverWithPciResources.
FakeDriverWithPciResourcesConfigFixture, self).setUp()
- # Set passthrough_whitelist before the compute node starts to match
+ # Set device_spec before the compute node starts to match
# with the PCI devices reported by this fake driver.
# NOTE(gibi): 0000:01:00 is tagged to physnet1 and therefore not a
@@ -920,7 +963,7 @@ class FakeDriverWithPciResources(SmallFakeDriver):
# Having two PFs on the same physnet will allow us to test the
# placement allocation - physical allocation matching based on the
# bandwidth allocation in the future.
- CONF.set_override('passthrough_whitelist', override=[
+ CONF.set_override('device_spec', override=[
jsonutils.dumps(
{
"address": {
@@ -954,6 +997,19 @@ class FakeDriverWithPciResources(SmallFakeDriver):
],
group='pci')
+ # These mocks should be removed after bug
+ # https://bugs.launchpad.net/nova/+bug/1961587 has been fixed and
+ # every SRIOV device related information is transferred through the
+ # virt driver and the PciDevice object instead of queried with
+ # sysfs calls by the network.neutron.API code.
+ self.useFixture(fixtures.MockPatch(
+ 'nova.pci.utils.get_mac_by_pci_address',
+ return_value='52:54:00:1e:59:c6'))
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.pci.utils.get_vf_num_by_pci_address',
+ return_value=1))
+
def get_available_resource(self, nodename):
host_status = super(
FakeDriverWithPciResources, self).get_available_resource(nodename)
@@ -1055,3 +1111,42 @@ class FakeDriverWithCaching(FakeDriver):
else:
self.cached_images.add(image_id)
return True
+
+
+class EphEncryptionDriver(MediumFakeDriver):
+ capabilities = dict(
+ FakeDriver.capabilities,
+ supports_ephemeral_encryption=True)
+
+
+class EphEncryptionDriverLUKS(MediumFakeDriver):
+ capabilities = dict(
+ FakeDriver.capabilities,
+ supports_ephemeral_encryption=True,
+ supports_ephemeral_encryption_luks=True)
+
+
+class EphEncryptionDriverPLAIN(MediumFakeDriver):
+ capabilities = dict(
+ FakeDriver.capabilities,
+ supports_ephemeral_encryption=True,
+ supports_ephemeral_encryption_plain=True)
+
+
+class FakeDriverWithoutFakeNodes(FakeDriver):
+ """FakeDriver that behaves like a real single-node driver.
+
+ This behaves like a real virt driver from the perspective of its
+ nodes, with a stable nodename and use of the global node identity
+ stuff to provide a stable node UUID.
+ """
+
+ def get_available_resource(self, nodename):
+ resources = super().get_available_resource(nodename)
+ resources['uuid'] = nova.virt.node.get_local_node_uuid()
+ return resources
+
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {
+ nova.virt.node.get_local_node_uuid(): self.get_available_nodes()[0]
+ }
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 994be56418..9693e405d3 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -869,7 +869,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
continue
break
@@ -895,7 +895,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
cpuset_reserved = _get_reserved(
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
return
LOG.debug('Selected cores for pinning: %s, in cell %s', pinning,
host_cell.id)
@@ -1213,10 +1213,13 @@ def _check_for_mem_encryption_requirement_conflicts(
"image %(image_name)s which has hw_mem_encryption property "
"explicitly set to %(image_val)s"
)
+ # image_meta.name is not set if image object represents root
+ # Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {
'flavor_name': flavor.name,
'flavor_val': flavor_mem_enc_str,
- 'image_name': image_meta.name,
+ 'image_name': image_name,
'image_val': image_mem_enc,
}
raise exception.FlavorImageConflict(emsg % data)
@@ -1228,10 +1231,15 @@ def _check_mem_encryption_uses_uefi_image(requesters, image_meta):
emsg = _(
"Memory encryption requested by %(requesters)s but image "
- "%(image_name)s doesn't have 'hw_firmware_type' property set to 'uefi'"
+ "%(image_name)s doesn't have 'hw_firmware_type' property set to "
+ "'uefi' or volume-backed instance was requested"
)
+ # image_meta.name is not set if image object represents root Cinder
+ # volume, for this case FlavorImageConflict should be raised, but
+ # image_meta.name can't be extracted.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {'requesters': " and ".join(requesters),
- 'image_name': image_meta.name}
+ 'image_name': image_name}
raise exception.FlavorImageConflict(emsg % data)
@@ -1260,12 +1268,14 @@ def _check_mem_encryption_machine_type(image_meta, machine_type=None):
if mach_type is None:
return
+ # image_meta.name is not set if image object represents root Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
# Could be something like pc-q35-2.11 if a specific version of the
# machine type is required, so do substring matching.
if 'q35' not in mach_type:
raise exception.InvalidMachineType(
mtype=mach_type,
- image_id=image_meta.id, image_name=image_meta.name,
+ image_id=image_meta.id, image_name=image_name,
reason=_("q35 type is required for SEV to work"))
@@ -1337,6 +1347,48 @@ def _get_constraint_mappings_from_flavor(flavor, key, func):
return hw_numa_map or None
+def get_locked_memory_constraint(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> ty.Optional[bool]:
+ """Validate and return the requested locked memory.
+
+ :param flavor: ``nova.objects.Flavor`` instance
+ :param image_meta: ``nova.objects.ImageMeta`` instance
+ :raises: exception.LockMemoryForbidden if mem_page_size is not set
+ while provide locked_memory value in image or flavor.
+ :returns: The locked memory flag requested.
+ """
+ mem_page_size_flavor, mem_page_size_image = _get_flavor_image_meta(
+ 'mem_page_size', flavor, image_meta)
+
+ locked_memory_flavor, locked_memory_image = _get_flavor_image_meta(
+ 'locked_memory', flavor, image_meta)
+
+ if locked_memory_flavor is not None:
+ # locked_memory_image is boolean type already
+ locked_memory_flavor = strutils.bool_from_string(locked_memory_flavor)
+
+ if locked_memory_image is not None and (
+ locked_memory_flavor != locked_memory_image
+ ):
+ # We don't allow provide different value to flavor and image
+ raise exception.FlavorImageLockedMemoryConflict(
+ image=locked_memory_image, flavor=locked_memory_flavor)
+
+ locked_memory = locked_memory_flavor
+
+ else:
+ locked_memory = locked_memory_image
+
+ if locked_memory and not (
+ mem_page_size_flavor or mem_page_size_image
+ ):
+ raise exception.LockMemoryForbidden()
+
+ return locked_memory
+
+
def _get_numa_cpu_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
@@ -1784,6 +1836,57 @@ def get_pci_numa_policy_constraint(
return policy
+def get_pmu_constraint(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> ty.Optional[bool]:
+ """Validate and return the requested vPMU configuration.
+
+ This one's a little different since we don't return False in the default
+ case: the PMU should only be configured if explicit configuration is
+ provided, otherwise we leave it to the hypervisor.
+
+ :param flavor: ``nova.objects.Flavor`` instance
+ :param image_meta: ``nova.objects.ImageMeta`` instance
+ :raises: nova.exception.FlavorImageConflict if a value is specified in both
+ the flavor and the image, but the values do not match
+ :raises: nova.exception.Invalid if a value or combination of values is
+ invalid
+ :returns: True if the virtual Performance Monitoring Unit must be enabled,
+ False if it should be disabled, or None if unconfigured.
+ """
+ flavor_value_str, image_value = _get_flavor_image_meta(
+ 'pmu', flavor, image_meta)
+
+ flavor_value = None
+ if flavor_value_str is not None:
+ flavor_value = strutils.bool_from_string(flavor_value_str)
+
+ if (
+ image_value is not None and
+ flavor_value is not None and
+ image_value != flavor_value
+ ):
+ msg = _(
+ "Flavor %(flavor_name)s has %(prefix)s:%(key)s extra spec "
+ "explicitly set to %(flavor_val)s, conflicting with image "
+ "%(image_name)s which has %(prefix)s_%(key)s explicitly set to "
+ "%(image_val)s."
+ )
+ raise exception.FlavorImageConflict(
+ msg % {
+ 'prefix': 'hw',
+ 'key': 'pmu',
+ 'flavor_name': flavor.name,
+ 'flavor_val': flavor_value,
+ 'image_name': image_meta.name,
+ 'image_val': image_value,
+ },
+ )
+
+ return flavor_value if flavor_value is not None else image_value
+
+
def get_vif_multiqueue_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
@@ -2056,6 +2159,8 @@ def numa_get_constraints(flavor, image_meta):
pagesize = _get_numa_pagesize_constraint(flavor, image_meta)
vpmems = get_vpmems(flavor)
+ get_locked_memory_constraint(flavor, image_meta)
+
# If 'hw:cpu_dedicated_mask' is not found in flavor extra specs, the
# 'dedicated_cpus' variable is None, while we hope it being an empty set.
dedicated_cpus = dedicated_cpus or set()
@@ -2200,6 +2305,7 @@ def _numa_cells_support_network_metadata(
def numa_fit_instance_to_host(
host_topology: 'objects.NUMATopology',
instance_topology: 'objects.InstanceNUMATopology',
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
limits: ty.Optional['objects.NUMATopologyLimit'] = None,
pci_requests: ty.Optional['objects.InstancePCIRequests'] = None,
pci_stats: ty.Optional[stats.PciDeviceStats] = None,
@@ -2215,6 +2321,12 @@ def numa_fit_instance_to_host(
:param host_topology: objects.NUMATopology object to fit an
instance on
:param instance_topology: objects.InstanceNUMATopology to be fitted
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param limits: objects.NUMATopologyLimits that defines limits
:param pci_requests: instance pci_requests
:param pci_stats: pci_stats for the host
@@ -2244,21 +2356,99 @@ def numa_fit_instance_to_host(
host_cells = host_topology.cells
- # If PCI device(s) are not required, prefer host cells that don't have
- # devices attached. Presence of a given numa_node in a PCI pool is
- # indicative of a PCI device being associated with that node
- if not pci_requests and pci_stats:
- # TODO(stephenfin): pci_stats can't be None here but mypy can't figure
- # that out for some reason
- host_cells = sorted(host_cells, key=lambda cell: cell.id in [
- pool['numa_node'] for pool in pci_stats.pools]) # type: ignore
+ # We need to perform all optimizations only if number of instance's
+ # cells less than host's cells number. If it's equal, we'll use
+ # all cells and no sorting of the cells list is needed.
+ if len(host_topology) > len(instance_topology):
+ pack = CONF.compute.packing_host_numa_cells_allocation_strategy
+ # To balance NUMA cells usage based on several parameters
+ # some sorts performed on host_cells list to move less used cells
+ # to the beginning of the host_cells list (when pack variable is set to
+ # 'False'). When pack is set to 'True', most used cells will be put at
+ # the beginning of the host_cells list.
+
+ # Fist sort is based on memory usage. cell.avail_memory returns free
+ # memory for cell. Revert sorting to get cells with more free memory
+ # first when pack is 'False'
+ host_cells = sorted(
+ host_cells,
+ reverse=not pack,
+ key=lambda cell: cell.avail_memory)
+
+ # Next sort based on available dedicated or shared CPUs.
+ # cpu_policy is set to the same value in all cells so we use
+ # first cell in list (it exists if instance_topology defined)
+ # to get cpu_policy
+ if instance_topology.cells[0].cpu_policy in (
+ None, fields.CPUAllocationPolicy.SHARED):
+ # sort based on used CPUs
+ host_cells = sorted(
+ host_cells,
+ reverse=pack,
+ key=lambda cell: cell.cpu_usage)
+ else:
+ # sort based on presence of pinned CPUs
+ host_cells = sorted(
+ host_cells,
+ reverse=not pack,
+ key=lambda cell: len(cell.free_pcpus))
+
+ # Perform sort only if pci_stats exists
+ if pci_stats:
+ # Create dict with numa cell id as key
+ # and total number of free pci devices as value.
+ total_pci_in_cell: ty.Dict[int, int] = {}
+ for pool in pci_stats.pools:
+ if pool['numa_node'] in list(total_pci_in_cell):
+ total_pci_in_cell[pool['numa_node']] += pool['count']
+ else:
+ total_pci_in_cell[pool['numa_node']] = pool['count']
+ # For backward compatibility we will always 'spread':
+ # we always move host cells with PCI at the beginning if PCI
+ # requested by VM and move host cells with PCI at the end of the
+ # list if PCI isn't requested by VM
+ if pci_requests:
+ host_cells = sorted(
+ host_cells,
+ reverse=True,
+ key=lambda cell: total_pci_in_cell.get(cell.id, 0))
+ else:
+ host_cells = sorted(
+ host_cells,
+ key=lambda cell: total_pci_in_cell.get(cell.id, 0))
+
+ # a set of host_cell.id, instance_cell.id pairs where we already checked
+ # that the instance cell does not fit
+ not_fit_cache = set()
+ # a set of host_cell.id, instance_cell.id pairs where we already checked
+ # that the instance cell does fit
+ fit_cache = set()
for host_cell_perm in itertools.permutations(
host_cells, len(instance_topology)):
chosen_instance_cells: ty.List['objects.InstanceNUMACell'] = []
chosen_host_cells: ty.List['objects.NUMACell'] = []
for host_cell, instance_cell in zip(
host_cell_perm, instance_topology.cells):
+
+ cell_pair = (host_cell.id, instance_cell.id)
+
+ # if we already checked this pair, and they did not fit then no
+ # need to check again just move to the next permutation
+ if cell_pair in not_fit_cache:
+ break
+
+ # if we already checked this pair, and they fit before that they
+ # will fit now too. So no need to check again. Just continue with
+ # the next cell pair in the permutation
+ if cell_pair in fit_cache:
+ chosen_host_cells.append(host_cell)
+ # Normally this would have done by _numa_fit_instance_cell
+ # but we optimized that out here based on the cache
+ instance_cell.id = host_cell.id
+ chosen_instance_cells.append(instance_cell)
+ continue
+
try:
cpuset_reserved = 0
if (instance_topology.emulator_threads_isolated and
@@ -2275,17 +2465,24 @@ def numa_fit_instance_to_host(
# This exception will been raised if instance cell's
# custom pagesize is not supported with host cell in
# _numa_cell_supports_pagesize_request function.
+
+ # cache the result
+ not_fit_cache.add(cell_pair)
break
if got_cell is None:
+ # cache the result
+ not_fit_cache.add(cell_pair)
break
chosen_host_cells.append(host_cell)
chosen_instance_cells.append(got_cell)
+ # cache the result
+ fit_cache.add(cell_pair)
if len(chosen_instance_cells) != len(host_cell_perm):
continue
if pci_requests and pci_stats and not pci_stats.support_requests(
- pci_requests, chosen_instance_cells):
+ pci_requests, provider_mapping, chosen_instance_cells):
continue
if network_metadata and not _numa_cells_support_network_metadata(
@@ -2386,6 +2583,7 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
cpuset=host_cell.cpuset,
pcpuset=host_cell.pcpuset,
memory=host_cell.memory,
+ socket=host_cell.socket,
cpu_usage=0,
memory_usage=0,
mempages=host_cell.mempages,
@@ -2410,8 +2608,10 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
None, fields.CPUAllocationPolicy.SHARED,
):
continue
-
- pinned_cpus = set(instance_cell.cpu_pinning.values())
+ if instance_cell.cpu_pinning:
+ pinned_cpus = set(instance_cell.cpu_pinning.values())
+ else:
+ pinned_cpus = set()
if instance_cell.cpuset_reserved:
pinned_cpus |= instance_cell.cpuset_reserved
@@ -2462,3 +2662,73 @@ def check_hw_rescue_props(image_meta):
"""
hw_rescue_props = ['hw_rescue_device', 'hw_rescue_bus']
return any(key in image_meta.properties for key in hw_rescue_props)
+
+
+def get_ephemeral_encryption_constraint(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> bool:
+ """Get the ephemeral encryption constrants based on the flavor and image.
+
+ :param flavor: an objects.Flavor object
+ :param image_meta: an objects.ImageMeta object
+ :raises: nova.exception.FlavorImageConflict
+ :returns: boolean indicating whether encryption of guest ephemeral storage
+ was requested
+ """
+ flavor_eph_encryption_str, image_eph_encryption = _get_flavor_image_meta(
+ 'ephemeral_encryption', flavor, image_meta)
+
+ flavor_eph_encryption = None
+ if flavor_eph_encryption_str is not None:
+ flavor_eph_encryption = strutils.bool_from_string(
+ flavor_eph_encryption_str)
+
+ # Check for conflicts between explicit requirements regarding
+ # ephemeral encryption.
+ # TODO(layrwood): make _check_for_mem_encryption_requirement_conflicts
+ # generic and reuse here
+ if (
+ flavor_eph_encryption is not None and
+ image_eph_encryption is not None and
+ flavor_eph_encryption != image_eph_encryption
+ ):
+ emsg = _(
+ "Flavor %(flavor_name)s has hw:ephemeral_encryption extra spec "
+ "explicitly set to %(flavor_val)s, conflicting with "
+ "image %(image_name)s which has hw_eph_encryption property "
+ "explicitly set to %(image_val)s"
+ )
+ data = {
+ 'flavor_name': flavor.name,
+ 'flavor_val': flavor_eph_encryption_str,
+ 'image_name': image_meta.name,
+ 'image_val': image_eph_encryption,
+ }
+ raise exception.FlavorImageConflict(emsg % data)
+
+ return flavor_eph_encryption or image_eph_encryption
+
+
+def get_ephemeral_encryption_format(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> ty.Optional[str]:
+ """Get the ephemeral encryption format.
+
+ :param flavor: an objects.Flavor object
+ :param image_meta: an objects.ImageMeta object
+ :raises: nova.exception.FlavorImageConflict or nova.exception.Invalid
+ :returns: BlockDeviceEncryptionFormatType or None
+ """
+ eph_format = _get_unique_flavor_image_meta(
+ 'ephemeral_encryption_format', flavor, image_meta)
+ if eph_format:
+ if eph_format not in fields.BlockDeviceEncryptionFormatType.ALL:
+ allowed = fields.BlockDeviceEncryptionFormatType.ALL
+ raise exception.Invalid(
+ f"Invalid ephemeral encryption format {eph_format}. "
+ f"Allowed values: {', '.join(allowed)}"
+ )
+ return eph_format
+ return None
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 350e59e295..ba18c85cf7 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -103,6 +103,7 @@ class HyperVDriver(driver.ComputeDriver):
"supports_pcpus": False,
"supports_accelerators": False,
"supports_secure_boot": True,
+ "supports_remote_managed_ports": False,
# Supported image types
"supports_image_type_vhd": True,
@@ -145,6 +146,14 @@ class HyperVDriver(driver.ComputeDriver):
'in Rocky.')
def init_host(self, host):
+ LOG.warning(
+ 'The hyperv driver is not tested by the OpenStack project nor '
+ 'does it have clear maintainer(s) and thus its quality can not be '
+ 'ensured. It should be considered experimental and may be removed '
+ 'in a future release. If you are using the driver in production '
+ 'please let us know via the openstack-discuss mailing list.'
+ )
+
self._serialconsoleops.start_console_handlers()
event_handler = eventhandler.InstanceEventHandler(
state_change_callback=self.emit_event)
diff --git a/nova/virt/hyperv/serialproxy.py b/nova/virt/hyperv/serialproxy.py
index 4f8a99dcf6..d12fb8bf6e 100644
--- a/nova/virt/hyperv/serialproxy.py
+++ b/nova/virt/hyperv/serialproxy.py
@@ -46,7 +46,7 @@ class SerialProxy(threading.Thread):
def __init__(self, instance_name, addr, port, input_queue,
output_queue, client_connected):
super(SerialProxy, self).__init__()
- self.setDaemon(True)
+ self.daemon = True
self._instance_name = instance_name
self._addr = addr
@@ -99,7 +99,7 @@ class SerialProxy(threading.Thread):
workers = []
for job in [self._get_data, self._send_data]:
worker = threading.Thread(target=job)
- worker.setDaemon(True)
+ worker.daemon = True
worker.start()
workers.append(worker)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 5358f3766a..f13c872290 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -110,6 +110,34 @@ def get_info(context, image_href):
return IMAGE_API.get(context, image_href)
+def check_vmdk_image(image_id, data):
+ # Check some rules about VMDK files. Specifically we want to make
+ # sure that the "create-type" of the image is one that we allow.
+ # Some types of VMDK files can reference files outside the disk
+ # image and we do not want to allow those for obvious reasons.
+
+ types = CONF.compute.vmdk_allowed_types
+
+ if not len(types):
+ LOG.warning('Refusing to allow VMDK image as vmdk_allowed_'
+ 'types is empty')
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ try:
+ create_type = data.format_specific['data']['create-type']
+ except KeyError:
+ msg = _('Unable to determine VMDK create-type')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ if create_type not in CONF.compute.vmdk_allowed_types:
+ LOG.warning('Refusing to process VMDK file with create-type of %r '
+ 'which is not in allowed set of: %s', create_type,
+ ','.join(CONF.compute.vmdk_allowed_types))
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+
def fetch_to_raw(context, image_href, path, trusted_certs=None):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, trusted_certs)
@@ -129,6 +157,9 @@ def fetch_to_raw(context, image_href, path, trusted_certs=None):
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
+ if fmt == 'vmdk':
+ check_vmdk_image(image_href, data)
+
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw", image_href, fmt)
diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template
index ee78a1fc60..453ac43a8f 100644
--- a/nova/virt/interfaces.template
+++ b/nova/virt/interfaces.template
@@ -9,6 +9,7 @@ iface lo inet loopback
{% for ifc in interfaces %}
auto {{ ifc.name }}
+{% if ifc.address %}
iface {{ ifc.name }} inet static
hwaddress ether {{ ifc.hwaddress }}
address {{ ifc.address }}
@@ -20,11 +21,11 @@ iface {{ ifc.name }} inet static
{% if ifc.dns %}
dns-nameservers {{ ifc.dns }}
{% endif %}
+{% endif %}
{% if use_ipv6 %}
-{% if libvirt_virt_type == 'lxc' %}
{% if ifc.address_v6 %}
+{% if libvirt_virt_type == 'lxc' %}
post-up ip -6 addr add {{ ifc.address_v6 }}/{{ifc.netmask_v6 }} dev ${IFACE}
-{% endif %}
{% if ifc.gateway_v6 %}
post-up ip -6 route add default via {{ ifc.gateway_v6 }} dev ${IFACE}
{% endif %}
@@ -41,4 +42,5 @@ iface {{ ifc.name }} inet6 static
{% endif %}
{% endif %}
{% endif %}
+{% endif %}
{% endfor %}
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 2a4fd39fda..77fefb81ea 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -20,13 +20,13 @@ bare metal resources.
"""
import base64
-from distutils import version
import gzip
import shutil
import tempfile
import time
from urllib import parse as urlparse
+import microversion_parse
from openstack import exceptions as sdk_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
@@ -164,6 +164,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"supports_trusted_certs": False,
"supports_pcpus": False,
"supports_accelerators": False,
+ "supports_remote_managed_ports": False,
# Image type support flags
"supports_image_type_aki": False,
@@ -396,6 +397,18 @@ class IronicDriver(virt_driver.ComputeDriver):
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
node = self._get_node(node_uuid)
+
+ # Its possible this node has just moved from deleting
+ # to cleaning. Placement will update the inventory
+ # as all reserved, but this instance might have got here
+ # before that happened, but after the previous allocation
+ # got deleted. We trigger a re-schedule to another node.
+ if (self._node_resources_used(node) or
+ self._node_resources_unavailable(node)):
+ msg = "Chosen ironic node %s is not available" % node_uuid
+ LOG.info(msg, instance=instance)
+ raise exception.ComputeResourcesUnavailable(reason=msg)
+
self._set_instance_id(node, instance)
def failed_spawn_cleanup(self, instance):
@@ -741,7 +754,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# baremetal nodes. Depending on the version of Ironic,
# this can be as long as 2-10 seconds per every thousand
# nodes, and this call may retrieve all nodes in a deployment,
- # depending on if any filter paramters are applied.
+ # depending on if any filter parameters are applied.
return self._get_node_list(fields=_NODE_FIELDS, **kwargs)
# NOTE(jroll) if partition_key is set, we need to limit nodes that
@@ -826,6 +839,13 @@ class IronicDriver(virt_driver.ComputeDriver):
return node_uuids
+ def get_nodenames_by_uuid(self, refresh=False):
+ nodes = self.get_available_nodes(refresh=refresh)
+ # We use the uuid for compute_node.uuid and
+ # compute_node.hypervisor_hostname, so the dict keys and values are
+ # the same.
+ return dict(zip(nodes, nodes))
+
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider and
inventory information.
@@ -873,15 +893,25 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
# nodename is the ironic node's UUID.
node = self._node_from_cache(nodename)
+
reserved = False
- if (not self._node_resources_used(node) and
- self._node_resources_unavailable(node)):
- LOG.debug('Node %(node)s is not ready for a deployment, '
- 'reporting resources as reserved for it. Node\'s '
- 'provision state is %(prov)s, power state is '
- '%(power)s and maintenance is %(maint)s.',
- {'node': node.uuid, 'prov': node.provision_state,
- 'power': node.power_state, 'maint': node.maintenance})
+ if self._node_resources_unavailable(node):
+ # Operators might mark a node as in maintainance,
+ # even when an instance is on the node,
+ # either way lets mark this as reserved
+ reserved = True
+
+ if (self._node_resources_used(node) and
+ not CONF.workarounds.skip_reserve_in_use_ironic_nodes):
+ # Make resources as reserved once we have
+ # and instance here.
+ # When the allocation is deleted, most likely
+ # automatic clean will start, so we keep the node
+ # reserved until it becomes available again.
+ # In the case without automatic clean, once
+ # the allocation is removed in placement it
+ # also stays as reserved until we notice on
+ # the next periodic its actually available.
reserved = True
info = self._node_resource(node)
@@ -1629,7 +1659,8 @@ class IronicDriver(virt_driver.ComputeDriver):
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None,
evacuate=False, block_device_info=None,
- preserve_ephemeral=False, accel_uuids=None):
+ preserve_ephemeral=False, accel_uuids=None,
+ reimage_boot_volume=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
@@ -1670,7 +1701,13 @@ class IronicDriver(virt_driver.ComputeDriver):
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
:param accel_uuids: Accelerator UUIDs. Ignored by this driver.
+ :param reimage_boot_volume: Re-image the volume backed instance.
"""
+ if reimage_boot_volume:
+ raise exception.NovaException(
+ _("Ironic doesn't support rebuilding volume backed "
+ "instances."))
+
LOG.debug('Rebuild called for instance', instance=instance)
instance.task_state = task_states.REBUILD_SPAWNING
@@ -2056,7 +2093,7 @@ class IronicDriver(virt_driver.ComputeDriver):
return None
def _can_send_version(self, min_version=None, max_version=None):
- """Validate if the suppplied version is available in the API."""
+ """Validate if the supplied version is available in the API."""
# NOTE(TheJulia): This will effectively just be a pass if no
# version negotiation has occured, since there is no way for
# us to know without explicitly otherwise requesting that
@@ -2066,13 +2103,17 @@ class IronicDriver(virt_driver.ComputeDriver):
if self.ironicclient.is_api_version_negotiated:
current_api_version = self.ironicclient.current_api_version
if (min_version and
- version.StrictVersion(current_api_version) <
- version.StrictVersion(min_version)):
+ microversion_parse.parse_version_string(
+ current_api_version) <
+ microversion_parse.parse_version_string(
+ min_version)):
raise exception.IronicAPIVersionNotAvailable(
version=min_version)
if (max_version and
- version.StrictVersion(current_api_version) >
- version.StrictVersion(max_version)):
+ microversion_parse.parse_version_string(
+ current_api_version) >
+ microversion_parse.parse_version_string(
+ max_version)):
raise exception.IronicAPIVersionNotAvailable(
version=max_version)
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index f86a9c461c..4efc6fbaeb 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -73,6 +73,7 @@ import itertools
import operator
from oslo_config import cfg
+from oslo_serialization import jsonutils
from nova import block_device
@@ -400,6 +401,16 @@ def get_info_from_bdm(instance, virt_type, image_meta, bdm,
# NOTE(ndipanov): libvirt starts ordering from 1, not 0
bdm_info['boot_index'] = str(boot_index + 1)
+ # If the device is encrypted pass through the secret, format and options
+ if bdm.get('encrypted'):
+ bdm_info['encrypted'] = bdm.get('encrypted')
+ bdm_info['encryption_secret_uuid'] = bdm.get('encryption_secret_uuid')
+ bdm_info['encryption_format'] = bdm.get('encryption_format')
+ encryption_options = bdm.get('encryption_options')
+ if encryption_options:
+ bdm_info['encryption_options'] = jsonutils.loads(
+ encryption_options)
+
return bdm_info
@@ -414,13 +425,7 @@ def get_device_name(bdm):
def get_root_info(instance, virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name=None):
- # NOTE (ndipanov): This is a hack to avoid considering an image
- # BDM with local target, as we don't support them
- # yet. Only applies when passed non-driver format
- no_root_bdm = (not root_bdm or (
- root_bdm.get('source_type') == 'image' and
- root_bdm.get('destination_type') == 'local'))
- if no_root_bdm:
+ if root_bdm is None:
# NOTE(mriedem): In case the image_meta object was constructed from
# an empty dict, like in the case of evacuate, we have to first check
# if disk_format is set on the ImageMeta object.
@@ -452,10 +457,13 @@ def default_device_names(virt_type, context, instance, block_device_info,
image_meta):
get_disk_info(virt_type, instance, image_meta, block_device_info)
- for driver_bdm in itertools.chain(block_device_info['ephemerals'],
- [block_device_info['swap']] if
- block_device_info['swap'] else [],
- block_device_info['block_device_mapping']):
+ for driver_bdm in itertools.chain(
+ block_device_info['image'],
+ block_device_info['ephemerals'],
+ [block_device_info['swap']] if
+ block_device_info['swap'] else [],
+ block_device_info['block_device_mapping']
+ ):
driver_bdm.save()
@@ -563,41 +571,48 @@ def _get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta,
:returns: Disk mapping for the given instance.
"""
mapping = {}
- pre_assigned_device_names = \
- [block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain(
+
+ driver_bdms = itertools.chain(
+ driver.block_device_info_get_image(block_device_info),
driver.block_device_info_get_ephemerals(block_device_info),
[driver.block_device_info_get_swap(block_device_info)],
- driver.block_device_info_get_mapping(block_device_info))
- if get_device_name(bdm)]
-
- # NOTE (ndipanov): root_bdm can be None when we boot from image
- # as there is no driver representation of local targeted images
- # and they will not be in block_device_info list.
- root_bdm = block_device.get_root_bdm(
- driver.block_device_info_get_mapping(block_device_info))
+ driver.block_device_info_get_mapping(block_device_info)
+ )
+ pre_assigned_device_names = [
+ block_device.strip_dev(get_device_name(bdm))
+ for bdm in driver_bdms if get_device_name(bdm)
+ ]
+
+ # Try to find the root driver bdm, either an image based disk or volume
+ root_bdm = None
+ if any(driver.block_device_info_get_image(block_device_info)):
+ root_bdm = driver.block_device_info_get_image(block_device_info)[0]
+ elif driver.block_device_info_get_mapping(block_device_info):
+ root_bdm = block_device.get_root_bdm(
+ driver.block_device_info_get_mapping(block_device_info))
root_device_name = block_device.strip_dev(
driver.block_device_info_get_root_device(block_device_info))
root_info = get_root_info(
instance, virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name)
-
mapping['root'] = root_info
- # NOTE (ndipanov): This implicitly relies on image->local BDMs not
- # being considered in the driver layer - so missing
- # bdm with boot_index 0 means - use image, unless it was
- # overridden. This can happen when using legacy syntax and
- # no root_device_name is set on the instance.
- if not root_bdm and not block_device.volume_in_mapping(root_info['dev'],
- block_device_info):
- mapping['disk'] = root_info
- elif root_bdm:
- # NOTE (ft): If device name is not set in root bdm, root_info has a
- # generated one. We have to copy device name to root bdm to prevent its
- # second generation in loop through bdms. If device name is already
- # set, nothing is changed.
+
+ # NOTE (ft): If device name is not set in root bdm, root_info has a
+ # generated one. We have to copy device name to root bdm to prevent its
+ # second generation in loop through bdms. If device name is already
+ # set, nothing is changed.
+ # NOTE(melwitt): root_bdm can be None in the case of a ISO root device, for
+ # example.
+ if root_bdm:
update_bdm(root_bdm, root_info)
+ if (
+ driver.block_device_info_get_image(block_device_info) or
+ root_bdm is None
+ ):
+ mapping['disk'] = root_info
+
default_eph = get_default_ephemeral_info(instance, disk_bus,
block_device_info, mapping)
if default_eph:
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 7129933f34..231283b8dd 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -24,6 +24,7 @@ helpers for populating up config object instances.
"""
import time
+import typing as ty
from collections import OrderedDict
from lxml import etree
@@ -32,6 +33,7 @@ from oslo_utils import units
from nova import exception
from nova.i18n import _
+from nova.objects import fields
from nova.pci import utils as pci_utils
from nova.virt import hardware
@@ -45,9 +47,12 @@ class LibvirtConfigObject(object):
def __init__(self, **kwargs):
super(LibvirtConfigObject, self).__init__()
- self.root_name = kwargs.get("root_name")
- self.ns_prefix = kwargs.get('ns_prefix')
- self.ns_uri = kwargs.get('ns_uri')
+ self.root_name = kwargs.pop("root_name")
+ self.ns_prefix = kwargs.pop("ns_prefix", None)
+ self.ns_uri = kwargs.pop("ns_uri", None)
+
+ # handle programmer error
+ assert not kwargs
def _new_node(self, node_name, **kwargs):
if self.ns_uri is None:
@@ -63,9 +68,6 @@ class LibvirtConfigObject(object):
child.text = str(value)
return child
- def get_yes_no_str(self, value):
- return 'yes' if value else 'no'
-
def format_dom(self):
return self._new_node(self.root_name)
@@ -84,6 +86,25 @@ class LibvirtConfigObject(object):
pretty_print=pretty_print)
return xml_str
+ @classmethod
+ def parse_on_off_str(self, value: ty.Optional[str]) -> bool:
+ if value is not None and value not in ('on', 'off'):
+ msg = _(
+ "Element should contain either 'on' or 'off'; "
+ "found: '%(value)s'"
+ )
+ raise exception.InvalidInput(msg % {'value': value})
+
+ return value == 'on'
+
+ @classmethod
+ def get_yes_no_str(self, value: bool) -> str:
+ return 'yes' if value else 'no'
+
+ @classmethod
+ def get_on_off_str(self, value: bool) -> str:
+ return 'on' if value else 'off'
+
def __repr__(self):
return self.to_xml(pretty_print=False)
@@ -1532,7 +1553,8 @@ class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice):
class LibvirtConfigGuestDiskEncryptionSecret(LibvirtConfigObject):
def __init__(self, **kwargs):
- super(LibvirtConfigGuestDiskEncryptionSecret, self).__init__(**kwargs)
+ super(LibvirtConfigGuestDiskEncryptionSecret, self).__init__(
+ root_name='diskencryptionsecret', **kwargs)
self.type = None
self.uuid = None
@@ -1552,7 +1574,8 @@ class LibvirtConfigGuestDiskEncryption(LibvirtConfigObject):
"""
def __init__(self, **kwargs):
- super(LibvirtConfigGuestDiskEncryption, self).__init__(**kwargs)
+ super(LibvirtConfigGuestDiskEncryption, self).__init__(
+ root_name='diskencryption', **kwargs)
self.format = None
self.secret = None
@@ -1575,7 +1598,8 @@ class LibvirtConfigGuestDiskEncryption(LibvirtConfigObject):
class LibvirtConfigGuestDiskMirror(LibvirtConfigObject):
def __init__(self, **kwargs):
- super(LibvirtConfigGuestDiskMirror, self).__init__(**kwargs)
+ super(LibvirtConfigGuestDiskMirror, self).__init__(
+ root_name='diskmirror', **kwargs)
self.ready = None
def parse_dom(self, xmldoc):
@@ -1585,6 +1609,8 @@ class LibvirtConfigGuestDiskMirror(LibvirtConfigObject):
class LibvirtConfigGuestIDMap(LibvirtConfigObject):
def __init__(self, **kwargs):
+ if 'root_name' not in kwargs:
+ kwargs['root_name'] = 'id'
super(LibvirtConfigGuestIDMap, self).__init__(**kwargs)
self.start = 0
self.target = 0
@@ -1912,6 +1938,8 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
if self.net_type == 'direct':
self.source_dev = c.get('dev')
self.source_mode = c.get('mode', 'private')
+ elif self.net_type == 'vdpa':
+ self.source_dev = c.get('dev')
elif self.net_type == 'vhostuser':
self.vhostuser_type = c.get('type')
self.vhostuser_mode = c.get('mode')
@@ -2019,6 +2047,12 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
self.keymap = None
self.listen = None
+ self.image_compression = None
+ self.jpeg_compression = None
+ self.zlib_compression = None
+ self.playback_compression = None
+ self.streaming_mode = None
+
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
@@ -2029,6 +2063,24 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
if self.listen:
dev.set("listen", self.listen)
+ if self.type == "spice":
+ if self.image_compression is not None:
+ dev.append(etree.Element(
+ 'image', compression=self.image_compression))
+ if self.jpeg_compression is not None:
+ dev.append(etree.Element(
+ 'jpeg', compression=self.jpeg_compression))
+ if self.zlib_compression is not None:
+ dev.append(etree.Element(
+ 'zlib', compression=self.zlib_compression))
+ if self.playback_compression is not None:
+ dev.append(etree.Element(
+ 'playback', compression=self.get_on_off_str(
+ self.playback_compression)))
+ if self.streaming_mode is not None:
+ dev.append(etree.Element(
+ 'streaming', mode=self.streaming_mode))
+
return dev
@@ -2168,13 +2220,14 @@ class LibvirtConfigGuestPCIeRootPortController(LibvirtConfigGuestController):
class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
- super(LibvirtConfigGuestHostdev, self).\
- __init__(root_name="hostdev", **kwargs)
- self.mode = kwargs.get('mode')
- self.type = kwargs.get('type')
+ super(LibvirtConfigGuestHostdev, self).__init__(
+ root_name="hostdev", **kwargs,
+ )
+ self.mode = None
+ self.type = None
# managed attribute is only used by PCI devices but mediated devices
# need to say managed=no
- self.managed = kwargs.get('managed', 'yes')
+ self.managed = "yes"
def format_dom(self):
dev = super(LibvirtConfigGuestHostdev, self).format_dom()
@@ -2194,8 +2247,11 @@ class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevPCI, self).\
- __init__(mode='subsystem', type='pci',
- **kwargs)
+ __init__(**kwargs)
+
+ self.mode = 'subsystem'
+ self.type = 'pci'
+
# These are returned from libvirt as hexadecimal strings with 0x prefix
# even if they have a different meaningful range: domain 16 bit,
# bus 8 bit, slot 5 bit, and function 3 bit
@@ -2252,10 +2308,14 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
class LibvirtConfigGuestHostdevMDEV(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
- super(LibvirtConfigGuestHostdevMDEV, self).__init__(
- mode='subsystem', type='mdev', managed='no', **kwargs)
+ super(LibvirtConfigGuestHostdevMDEV, self).__init__(**kwargs)
+
+ self.mode = 'subsystem'
+ self.type = 'mdev'
+ self.managed = 'no'
+
# model attribute is only supported by mediated devices
- self.model = kwargs.get('model', 'vfio-pci')
+ self.model = 'vfio-pci'
self.uuid = None
def format_dom(self):
@@ -2688,6 +2748,19 @@ class LibvirtConfigGuestFeatureKvmHidden(LibvirtConfigGuestFeature):
return root
+class LibvirtConfigGuestFeatureSMM(LibvirtConfigGuestFeature):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestFeatureSMM, self).__init__("smm", **kwargs)
+
+ def format_dom(self):
+ root = super(LibvirtConfigGuestFeatureSMM, self).format_dom()
+
+ root.append(etree.Element("smm", state="on"))
+
+ return root
+
+
class LibvirtConfigGuestFeaturePMU(LibvirtConfigGuestFeature):
def __init__(self, state, **kwargs):
@@ -2704,6 +2777,18 @@ class LibvirtConfigGuestFeaturePMU(LibvirtConfigGuestFeature):
return root
+class LibvirtConfigGuestFeatureIOAPIC(LibvirtConfigGuestFeature):
+
+ def __init__(self, **kwargs):
+ super().__init__("ioapic", **kwargs)
+ self.driver = "qemu"
+
+ def format_dom(self):
+ root = super().format_dom()
+ root.set('driver', self.driver)
+ return root
+
+
class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
# QEMU requires at least this value to be set
@@ -2719,6 +2804,15 @@ class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
self.vapic = False
self.spinlocks = False
self.spinlock_retries = self.MIN_SPINLOCK_RETRIES
+ self.vpindex = False
+ self.runtime = False
+ self.synic = False
+ self.reset = False
+ self.frequencies = False
+ self.reenlightenment = False
+ self.tlbflush = False
+ self.ipi = False
+ self.evmcs = False
self.vendorid_spoof = False
self.vendorid = self.SPOOFED_VENDOR_ID
@@ -2735,6 +2829,24 @@ class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
if self.vendorid_spoof:
root.append(etree.Element("vendor_id", state="on",
value=self.vendorid))
+ if self.vpindex:
+ root.append(etree.Element('vpindex', state='on'))
+ if self.runtime:
+ root.append(etree.Element('runtime', state='on'))
+ if self.synic:
+ root.append(etree.Element('synic', state='on'))
+ if self.reset:
+ root.append(etree.Element('reset', state='on'))
+ if self.frequencies:
+ root.append(etree.Element('frequencies', state='on'))
+ if self.reenlightenment:
+ root.append(etree.Element('reenlightenment', state='on'))
+ if self.tlbflush:
+ root.append(etree.Element('tlbflush', state='on'))
+ if self.ipi:
+ root.append(etree.Element('ipi', state='on'))
+ if self.evmcs:
+ root.append(etree.Element('evmcs', state='on'))
return root
@@ -2810,6 +2922,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.os_init_path = None
self.os_boot_dev = []
self.os_smbios = None
+ self.os_arch = None
self.os_mach_type = None
self.os_bootmenu = False
self.devices = []
@@ -2852,6 +2965,8 @@ class LibvirtConfigGuest(LibvirtConfigObject):
os.set("firmware", self.os_firmware)
type_node = self._text_node("type", self.os_type)
+ if self.os_arch is not None:
+ type_node.set("arch", self.os_arch)
if self.os_mach_type is not None:
type_node.set("machine", self.os_mach_type)
os.append(type_node)
@@ -3029,6 +3144,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
# LibvirtConfigGuestGidMap
# LibvirtConfigGuestCPU
# LibvirtConfigGuestVPMEM
+ # LibvirtConfigGuestIOMMU
for c in xmldoc:
if c.tag == 'devices':
for d in c:
@@ -3056,6 +3172,10 @@ class LibvirtConfigGuest(LibvirtConfigObject):
obj = LibvirtConfigGuestVPMEM()
obj.parse_dom(d)
self.devices.append(obj)
+ elif d.tag == 'iommu':
+ obj = LibvirtConfigGuestIOMMU()
+ obj.parse_dom(d)
+ self.devices.append(obj)
if c.tag == 'idmap':
for idmap in c:
obj = None
@@ -3080,7 +3200,10 @@ class LibvirtConfigGuest(LibvirtConfigObject):
else:
self._parse_basic_props(c)
- def add_device(self, dev):
+ def add_feature(self, dev: LibvirtConfigGuestFeature) -> None:
+ self.features.append(dev)
+
+ def add_device(self, dev: LibvirtConfigGuestDevice) -> None:
self.devices.append(dev)
def add_perf_event(self, event):
@@ -3130,6 +3253,7 @@ class LibvirtConfigNodeDevice(LibvirtConfigObject):
self.pci_capability = None
self.mdev_information = None
self.vdpa_capability = None
+ self.vpd_capability = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc)
@@ -3183,6 +3307,7 @@ class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
self.numa_node = None
self.fun_capability = []
self.mdev_capability = []
+ self.vpd_capability = None
self.interface = None
self.address = None
self.link_state = None
@@ -3225,6 +3350,10 @@ class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
mdevcap = LibvirtConfigNodeDeviceMdevCapableSubFunctionCap()
mdevcap.parse_dom(c)
self.mdev_capability.append(mdevcap)
+ elif c.tag == "capability" and c.get('type') in ('vpd',):
+ vpdcap = LibvirtConfigNodeDeviceVpdCap()
+ vpdcap.parse_dom(c)
+ self.vpd_capability = vpdcap
def pci_address(self):
return "%04x:%02x:%02x.%01x" % (
@@ -3277,6 +3406,7 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
root_name="capability", **kwargs)
self.type = None
self.iommu_group = None
+ self.uuid = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevInformation,
@@ -3286,6 +3416,103 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
self.type = c.get('id')
if c.tag == "iommuGroup":
self.iommu_group = int(c.get('number'))
+ if c.tag == "uuid":
+ self.uuid = c.text
+
+
+class LibvirtConfigNodeDeviceVpdCap(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super().__init__(
+ root_name="capability", **kwargs)
+ self._card_name = None
+ self._change_level = None
+ self._manufacture_id = None
+ self._part_number = None
+ self._serial_number = None
+ self._asset_tag = None
+ self._ro_vendor_fields = {}
+ self._rw_vendor_fields = {}
+ self._rw_system_fields = {}
+
+ @staticmethod
+ def _process_custom_field(fields_dict, field_element):
+ index = field_element.get('index')
+ if index:
+ fields_dict[index] = field_element.text
+
+ def _parse_ro_fields(self, fields_element):
+ for e in fields_element:
+ if e.tag == 'change_level':
+ self._change_level = e.text
+ elif e.tag == 'manufacture_id':
+ self._manufacture_id = e.text
+ elif e.tag == 'part_number':
+ self._part_number = e.text
+ elif e.tag == 'serial_number':
+ self._serial_number = e.text
+ elif e.tag == 'vendor_field':
+ self._process_custom_field(self._ro_vendor_fields, e)
+
+ def _parse_rw_fields(self, fields_element):
+ for e in fields_element:
+ if e.tag == 'asset_tag':
+ self._asset_tag = e.text
+ elif e.tag == 'vendor_field':
+ self._process_custom_field(self._rw_vendor_fields, e)
+ elif e.tag == 'system_field':
+ self._process_custom_field(self._rw_system_fields, e)
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigNodeDeviceVpdCap, self).parse_dom(xmldoc)
+ for c in xmldoc:
+ if c.tag == "name":
+ self._card_name = c.text
+ if c.tag == "fields":
+ access = c.get('access')
+ if access:
+ if access == 'readonly':
+ self._parse_ro_fields(c)
+ elif access == 'readwrite':
+ self._parse_rw_fields(c)
+ else:
+ continue
+
+ @property
+ def card_name(self):
+ return self._card_name
+
+ @property
+ def change_level(self):
+ return self._change_level
+
+ @property
+ def manufacture_id(self):
+ return self._manufacture_id
+
+ @property
+ def part_number(self):
+ return self._part_number
+
+ @property
+ def card_serial_number(self):
+ return self._serial_number
+
+ @property
+ def asset_tag(self):
+ return self._asset_tag
+
+ @property
+ def ro_vendor_fields(self):
+ return self._ro_vendor_fields
+
+ @property
+ def rw_vendor_fields(self):
+ return self._rw_vendor_fields
+
+ @property
+ def rw_system_fields(self):
+ return self._rw_system_fields
class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
@@ -3468,11 +3695,11 @@ class LibvirtConfigGuestVPMEM(LibvirtConfigGuestDevice):
self.model = "nvdimm"
self.access = "shared"
- self.source_path = kwargs.get("devpath", "")
- self.align_size = kwargs.get("align_kb", 0)
+ self.source_path = ""
+ self.align_size = 0
self.pmem = True
- self.target_size = kwargs.get("size_kb", 0)
+ self.target_size = 0
self.target_node = 0
self.label_size = 2 * units.Ki
@@ -3518,6 +3745,53 @@ class LibvirtConfigGuestVPMEM(LibvirtConfigGuestDevice):
self.target_size = sub.text
+class LibvirtConfigGuestIOMMU(LibvirtConfigGuestDevice):
+ """https://libvirt.org/formatdomain.html#iommu-devices"""
+
+ def __init__(self, **kwargs):
+ super().__init__(root_name="iommu", **kwargs)
+
+ self.model: str = fields.VIOMMUModel.AUTO
+ self.interrupt_remapping: bool = False
+ self.caching_mode: bool = False
+ self.eim: bool = False
+ self.iotlb: bool = False
+
+ def format_dom(self):
+ iommu = super().format_dom()
+ iommu.set("model", self.model)
+
+ driver = etree.Element("driver")
+ driver.set("intremap", self.get_on_off_str(self.interrupt_remapping))
+ driver.set("caching_mode", self.get_on_off_str(self.caching_mode))
+
+ # Set aw_bits to None when the Libvirt version not satisfy
+ # MIN_LIBVIRT_VIOMMU_AW_BITS in driver. When it's None, means it's not
+ # supported to have aw_bits.
+ if hasattr(self, "aw_bits"):
+ driver.set("aw_bits", str(self.aw_bits))
+ driver.set("eim", self.get_on_off_str(self.eim))
+ driver.set("iotlb", self.get_on_off_str(self.iotlb))
+ iommu.append(driver)
+
+ return iommu
+
+ def parse_dom(self, xmldoc):
+ super().parse_dom(xmldoc)
+ self.model = xmldoc.get("model")
+
+ driver = xmldoc.find("./driver")
+ if driver:
+ self.interrupt_remapping = self.parse_on_off_str(
+ driver.get("intremap"))
+ self.caching_mode = self.parse_on_off_str(
+ driver.get("caching_mode"))
+ if driver.get("aw_bits") is not None:
+ self.aw_bits = int(driver.get("aw_bits"))
+ self.iotlb = self.parse_on_off_str(driver.get("iotlb"))
+ self.eim = self.parse_on_off_str(driver.get("eim"))
+
+
class LibvirtConfigGuestMetaNovaPorts(LibvirtConfigObject):
def __init__(self, ports=None):
diff --git a/nova/virt/powervm/disk/__init__.py b/nova/virt/libvirt/cpu/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/virt/powervm/disk/__init__.py
+++ b/nova/virt/libvirt/cpu/__init__.py
diff --git a/nova/virt/libvirt/cpu/api.py b/nova/virt/libvirt/cpu/api.py
new file mode 100644
index 0000000000..1c17458d6b
--- /dev/null
+++ b/nova/virt/libvirt/cpu/api.py
@@ -0,0 +1,157 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dataclasses import dataclass
+
+from oslo_log import log as logging
+
+import nova.conf
+from nova import exception
+from nova.i18n import _
+from nova import objects
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import core
+
+LOG = logging.getLogger(__name__)
+
+CONF = nova.conf.CONF
+
+
+@dataclass
+class Core:
+ """Class to model a CPU core as reported by sysfs.
+
+ It may be a physical CPU core or a hardware thread on a shared CPU core
+ depending on if the system supports SMT.
+ """
+
+ # NOTE(sbauza): ident is a mandatory field.
+ # The CPU core id/number
+ ident: int
+
+ @property
+ def online(self) -> bool:
+ return core.get_online(self.ident)
+
+ @online.setter
+ def online(self, state: bool) -> None:
+ if state:
+ core.set_online(self.ident)
+ else:
+ core.set_offline(self.ident)
+
+ def __hash__(self):
+ return hash(self.ident)
+
+ def __eq__(self, other):
+ return self.ident == other.ident
+
+ def __str__(self):
+ return str(self.ident)
+
+ @property
+ def governor(self) -> str:
+ return core.get_governor(self.ident)
+
+ def set_high_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_high)
+
+ def set_low_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_low)
+
+
+def power_up(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_up = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = True
+ else:
+ pcpu.set_high_governor()
+ powered_up.add(str(pcpu))
+ LOG.debug("Cores powered up : %s", powered_up)
+
+
+def power_down(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_down = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ powered_down.add(str(pcpu))
+ LOG.debug("Cores powered down : %s", powered_down)
+
+
+def power_down_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if (CONF.libvirt.cpu_power_management and
+ not CONF.compute.cpu_dedicated_set
+ ):
+ msg = _("'[compute]/cpu_dedicated_set' is mandatory to be set if "
+ "'[libvirt]/cpu_power_management' is set."
+ "Please provide the CPUs that can be pinned or don't use the "
+ "power management if you only use shared CPUs.")
+ raise exception.InvalidConfiguration(msg)
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ LOG.debug("Cores powered down : %s", cpu_dedicated_set)
+
+
+def validate_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ governors = set()
+ cpu_states = set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ # we need to collect the governors strategy and the CPU states
+ governors.add(pcpu.governor)
+ cpu_states.add(pcpu.online)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ # all the cores need to have the same governor strategy
+ if len(governors) > 1:
+ msg = _("All the cores need to have the same governor strategy"
+ "before modifying the CPU states. You can reboot the "
+ "compute node if you prefer.")
+ raise exception.InvalidConfiguration(msg)
+ elif CONF.libvirt.cpu_power_management_strategy == 'governor':
+ # all the cores need to be online
+ if False in cpu_states:
+ msg = _("All the cores need to be online before modifying the "
+ "governor strategy.")
+ raise exception.InvalidConfiguration(msg)
diff --git a/nova/virt/libvirt/cpu/core.py b/nova/virt/libvirt/cpu/core.py
new file mode 100644
index 0000000000..782f028fee
--- /dev/null
+++ b/nova/virt/libvirt/cpu/core.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import typing as ty
+
+from oslo_log import log as logging
+
+from nova import exception
+from nova import filesystem
+import nova.privsep
+from nova.virt import hardware
+
+LOG = logging.getLogger(__name__)
+
+AVAILABLE_PATH = '/sys/devices/system/cpu/present'
+
+CPU_PATH_TEMPLATE = '/sys/devices/system/cpu/cpu%(core)s'
+
+
+def get_available_cores() -> ty.Set[int]:
+ cores = filesystem.read_sys(AVAILABLE_PATH)
+ return hardware.parse_cpu_spec(cores) if cores else set()
+
+
+def exists(core: int) -> bool:
+ return core in get_available_cores()
+
+
+def gen_cpu_path(core: int) -> str:
+ if not exists(core):
+ LOG.warning('Unable to access CPU: %s', core)
+ raise ValueError('CPU: %(core)s does not exist', core)
+ return CPU_PATH_TEMPLATE % {'core': core}
+
+
+def get_online(core: int) -> bool:
+ try:
+ online = filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'online')).strip()
+ except exception.FileNotFound:
+ # The online file may not exist if we haven't written it yet.
+ # By default, this means that the CPU is online.
+ online = '1'
+ return online == '1'
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_online(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='1')
+ return get_online(core)
+
+
+def set_offline(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='0')
+ return not get_online(core)
+
+
+def get_governor(core: int) -> str:
+ return filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor')).strip()
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_governor(core: int, governor: str) -> str:
+ filesystem.write_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor'),
+ data=governor)
+ return get_governor(core)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 2ea493d452..fe48960296 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -96,7 +96,6 @@ from nova import objects
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
-from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.libvirt
import nova.privsep.path
@@ -115,6 +114,7 @@ from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt.cpu import api as libvirt_cpu
from nova.virt.libvirt import designer
from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
@@ -188,6 +188,7 @@ VOLUME_DRIVERS = {
'vzstorage': 'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver', # noqa:E501
'storpool': 'nova.virt.libvirt.volume.storpool.LibvirtStorPoolVolumeDriver', # noqa:E501
'nvmeof': 'nova.virt.libvirt.volume.nvme.LibvirtNVMEVolumeDriver',
+ 'lightos': 'nova.virt.libvirt.volume.lightos.LibvirtLightOSVolumeDriver',
}
@@ -220,6 +221,12 @@ MIN_QEMU_VERSION = (4, 2, 0)
NEXT_MIN_LIBVIRT_VERSION = (7, 0, 0)
NEXT_MIN_QEMU_VERSION = (5, 2, 0)
+# vIOMMU driver attribute aw_bits minimal support version.
+MIN_LIBVIRT_VIOMMU_AW_BITS = (6, 5, 0)
+
+# vIOMMU model value `virtio` minimal support version
+MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL = (8, 3, 0)
+
MIN_LIBVIRT_AARCH64_CPU_COMPARE = (6, 9, 0)
# Virtuozzo driver support
@@ -243,6 +250,16 @@ LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_'
MIN_LIBVIRT_VDPA = (6, 9, 0)
MIN_QEMU_VDPA = (5, 1, 0)
+REGISTER_IMAGE_PROPERTY_DEFAULTS = [
+ 'hw_machine_type',
+ 'hw_cdrom_bus',
+ 'hw_disk_bus',
+ 'hw_input_bus',
+ 'hw_pointer_model',
+ 'hw_video_model',
+ 'hw_vif_model',
+]
+
class AsyncDeviceEventsHandler:
"""A synchornization point between libvirt events an clients waiting for
@@ -396,6 +413,8 @@ class LibvirtDriver(driver.ComputeDriver):
not CONF.force_raw_images)
requires_ploop_image = CONF.libvirt.virt_type == 'parallels'
+ self.image_backend = imagebackend.Backend(CONF.use_cow_images)
+
self.capabilities = {
"has_imagecache": True,
"supports_evacuate": True,
@@ -423,6 +442,10 @@ class LibvirtDriver(driver.ComputeDriver):
"supports_bfv_rescue": True,
"supports_vtpm": CONF.libvirt.swtpm_enabled,
"supports_socket_pci_numa_affinity": True,
+ "supports_ephemeral_encryption":
+ self.image_backend.backend().SUPPORTS_LUKS,
+ "supports_ephemeral_encryption_luks":
+ self.image_backend.backend().SUPPORTS_LUKS,
}
super(LibvirtDriver, self).__init__(virtapi)
@@ -447,7 +470,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
- self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
@@ -796,6 +818,18 @@ class LibvirtDriver(driver.ComputeDriver):
"force_raw_images to True.")
raise exception.InvalidConfiguration(msg)
+ # NOTE(sbauza): We verify first if the dedicated CPU performances were
+ # modified by Nova before. Note that it can provide an exception if
+ # either the governor strategies are different between the cores or if
+ # the cores are offline.
+ libvirt_cpu.validate_all_dedicated_cpus()
+ # NOTE(sbauza): We powerdown all dedicated CPUs but if some instances
+ # exist that are pinned for some CPUs, then we'll later powerup those
+ # CPUs when rebooting the instance in _init_instance()
+ # Note that it can provide an exception if the config options are
+ # wrongly modified.
+ libvirt_cpu.power_down_all_dedicated_cpus()
+
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
self._recreate_assigned_mediated_devices()
@@ -804,7 +838,9 @@ class LibvirtDriver(driver.ComputeDriver):
self._check_vtpm_support()
- self._register_instance_machine_type()
+ # Set REGISTER_IMAGE_PROPERTY_DEFAULTS in the instance system_metadata
+ # to default values for properties that have not already been set.
+ self._register_all_undefined_instance_details()
def _update_host_specific_capabilities(self) -> None:
"""Update driver capabilities based on capabilities of the host."""
@@ -812,36 +848,118 @@ class LibvirtDriver(driver.ComputeDriver):
# or UEFI bootloader support in this manner
self.capabilities.update({
'supports_secure_boot': self._host.supports_secure_boot,
+ 'supports_remote_managed_ports':
+ self._host.supports_remote_managed_ports
})
- def _register_instance_machine_type(self):
- """Register the machine type of instances on this host
+ def _register_all_undefined_instance_details(self) -> None:
+ """Register the default image properties of instances on this host
For each instance found on this host by InstanceList.get_by_host ensure
- a machine type is registered within the system metadata of the instance
+ REGISTER_IMAGE_PROPERTY_DEFAULTS are registered within the system
+ metadata of the instance
"""
context = nova_context.get_admin_context()
hostname = self._host.get_hostname()
+ for instance in objects.InstanceList.get_by_host(
+ context, hostname, expected_attrs=['flavor', 'system_metadata']
+ ):
+ try:
+ self._register_undefined_instance_details(context, instance)
+ except Exception:
+ LOG.exception('Ignoring unknown failure while attempting '
+ 'to save the defaults for unregistered image '
+ 'properties', instance=instance)
- for instance in objects.InstanceList.get_by_host(context, hostname):
- # NOTE(lyarwood): Skip if hw_machine_type is set already in the
- # image_meta of the instance. Note that this value comes from the
- # system metadata of the instance where it is stored under the
- # image_hw_machine_type key.
- if instance.image_meta.properties.get('hw_machine_type'):
- continue
+ def _register_undefined_instance_details(
+ self,
+ context: nova_context.RequestContext,
+ instance: 'objects.Instance',
+ ) -> None:
+ # Find any unregistered image properties against this instance
+ unregistered_image_props = [
+ p for p in REGISTER_IMAGE_PROPERTY_DEFAULTS
+ if f"image_{p}" not in instance.system_metadata
+ ]
- # Fetch and record the machine type from the config
- hw_machine_type = libvirt_utils.get_machine_type(
- instance.image_meta)
- # NOTE(lyarwood): As above this updates
- # image_meta.properties.hw_machine_type within the instance and
- # will be returned the next time libvirt_utils.get_machine_type is
- # called for the instance image meta.
- instance.system_metadata['image_hw_machine_type'] = hw_machine_type
- instance.save()
- LOG.debug("Instance machine_type updated to %s", hw_machine_type,
- instance=instance)
+ # Return if there's nothing left to register for this instance
+ if not unregistered_image_props:
+ return
+
+ LOG.debug(f'Attempting to register defaults for the following '
+ f'image properties: {unregistered_image_props}',
+ instance=instance)
+
+ # NOTE(lyarwood): Only build disk_info once per instance if we need it
+ # for hw_{disk,cdrom}_bus to avoid pulling bdms from the db etc.
+ requires_disk_info = ['hw_disk_bus', 'hw_cdrom_bus']
+ disk_info = None
+ if set(requires_disk_info) & set(unregistered_image_props):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ context, instance.uuid)
+ block_device_info = driver.get_block_device_info(instance, bdms)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, instance.image_meta,
+ block_device_info)
+
+ # Only pull the guest config once per instance if we need it for
+ # hw_pointer_model or hw_input_bus.
+ requires_guest_config = ['hw_pointer_model', 'hw_input_bus']
+ guest_config = None
+ if set(requires_guest_config) & set(unregistered_image_props):
+ guest_config = self._host.get_guest(instance).get_config()
+
+ for image_prop in unregistered_image_props:
+ try:
+ default_value = self._find_default_for_image_property(
+ instance, image_prop, disk_info, guest_config)
+ instance.system_metadata[f"image_{image_prop}"] = default_value
+
+ LOG.debug(f'Found default for {image_prop} of {default_value}',
+ instance=instance)
+ except Exception:
+ LOG.exception(f'Ignoring unknown failure while attempting '
+ f'to find the default of {image_prop}',
+ instance=instance)
+ instance.save()
+
+ def _find_default_for_image_property(
+ self,
+ instance: 'objects.Instance',
+ image_property: str,
+ disk_info: ty.Optional[ty.Dict[str, ty.Any]],
+ guest_config: ty.Optional[vconfig.LibvirtConfigGuest],
+ ) -> ty.Optional[str]:
+ if image_property == 'hw_machine_type':
+ return libvirt_utils.get_machine_type(instance.image_meta)
+
+ if image_property == 'hw_disk_bus' and disk_info:
+ return disk_info.get('disk_bus')
+
+ if image_property == 'hw_cdrom_bus' and disk_info:
+ return disk_info.get('cdrom_bus')
+
+ if image_property == 'hw_input_bus' and guest_config:
+ _, default_input_bus = self._get_pointer_bus_and_model(
+ guest_config, instance.image_meta)
+ return default_input_bus
+
+ if image_property == 'hw_pointer_model' and guest_config:
+ default_pointer_model, _ = self._get_pointer_bus_and_model(
+ guest_config, instance.image_meta)
+ # hw_pointer_model is of type PointerModelType ('usbtablet' instead
+ # of 'tablet')
+ if default_pointer_model == 'tablet':
+ default_pointer_model = 'usbtablet'
+ return default_pointer_model
+
+ if image_property == 'hw_video_model':
+ return self._get_video_type(instance.image_meta)
+
+ if image_property == 'hw_vif_model':
+ return self.vif_driver.get_vif_model(instance.image_meta)
+
+ return None
def _prepare_cpu_flag(self, flag):
# NOTE(kchamart) This helper method will be used while computing
@@ -884,33 +1002,26 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _("The cpu_models option is required when cpu_mode=custom")
raise exception.Invalid(msg)
- cpu = vconfig.LibvirtConfigGuestCPU()
- for model in models:
- cpu.model = self._get_cpu_model_mapping(model)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured CPU model: %(model)s is not "
- "compatible with host CPU. Please correct your "
- "config and try again. %(e)s") % {
- 'model': model, 'e': e})
- raise exception.InvalidCPUInfo(msg)
-
- # Use guest CPU model to check the compatibility between guest CPU and
- # configured extra_flags
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.model = self._host.get_capabilities().host.cpu.model
- for flag in set(x.lower() for x in CONF.libvirt.cpu_model_extra_flags):
- cpu_feature = self._prepare_cpu_flag(flag)
- cpu.add_feature(cpu_feature)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured extra flag: %(flag)s it not correct, or "
- "the host CPU does not support this flag. Please "
- "correct the config and try again. %(e)s") % {
- 'flag': flag, 'e': e})
- raise exception.InvalidCPUInfo(msg)
+ if not CONF.workarounds.skip_cpu_compare_at_startup:
+ # Use guest CPU model to check the compatibility between
+ # guest CPU and configured extra_flags
+ for model in models:
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.model = self._get_cpu_model_mapping(model)
+ for flag in set(x.lower() for
+ x in CONF.libvirt.cpu_model_extra_flags):
+ cpu_feature = self._prepare_cpu_flag(flag)
+ cpu.add_feature(cpu_feature)
+ try:
+ self._compare_cpu(cpu, self._get_cpu_info(), None)
+ except exception.InvalidCPUInfo as e:
+ msg = (_("Configured CPU model: %(model)s "
+ "and CPU Flags %(flags)s ar not "
+ "compatible with host CPU. Please correct your "
+ "config and try again. %(e)s") % {
+ 'model': model, 'e': e,
+ 'flags': CONF.libvirt.cpu_model_extra_flags})
+ raise exception.InvalidCPUInfo(msg)
def _check_vtpm_support(self) -> None:
# TODO(efried): A key manager must be configured to create/retrieve
@@ -1414,6 +1525,8 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
+ # We're sure the instance is gone, we can shutdown the core if so
+ libvirt_cpu.power_down(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, destroy_secrets=True):
@@ -2472,7 +2585,7 @@ class LibvirtDriver(driver.ComputeDriver):
# ServerRescueNegativeTestJSON.test_rescued_vm_detach_volume
# Log a warning and let the upper layer detect that the device is
# still attached and retry
- LOG.error(
+ LOG.warning(
'Waiting for libvirt event about the detach of '
'device %s with device alias %s from instance %s is timed '
'out.', device_name, dev.alias, instance_uuid)
@@ -2616,7 +2729,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
block_device.resize(new_size)
- def _resize_attached_encrypted_volume(self, original_new_size,
+ def _resize_attached_encrypted_volume(self, context, original_new_size,
block_device, instance,
connection_info, encryption):
# TODO(lyarwood): Also handle the dm-crpyt encryption providers of
@@ -2662,6 +2775,17 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.exception('Unknown error when attempting to find the '
'payload_offset for LUKSv1 encrypted disk '
'%s.', path, instance=instance)
+
+ else: # os-brick encryptor driver
+ encryptor = self._get_volume_encryptor(connection_info, encryption)
+ decrypted_device_new_size = encryptor.extend_volume(context,
+ **encryption)
+ if decrypted_device_new_size is None:
+ raise exception.VolumeExtendFailed(
+ volume_id=block_device._disk,
+ reason="Encryptor extend failed."
+ )
+
# NOTE(lyarwood): Resize the decrypted device within the instance to
# the calculated size as with normal volumes.
self._resize_attached_volume(
@@ -2710,7 +2834,7 @@ class LibvirtDriver(driver.ComputeDriver):
context, self._volume_api, volume_id, connection_info)
if encryption:
self._resize_attached_encrypted_volume(
- new_size, dev, instance,
+ context, new_size, dev, instance,
connection_info, encryption)
else:
self._resize_attached_volume(
@@ -3055,11 +3179,16 @@ class LibvirtDriver(driver.ComputeDriver):
current_power_state = guest.get_power_state(self._host)
+ libvirt_cpu.power_up(instance)
# TODO(stephenfin): Any reason we couldn't use 'self.resume' here?
guest.launch(pause=current_power_state == power_state.PAUSED)
self._attach_pci_devices(
- guest, pci_manager.get_instance_pci_devs(instance))
+ guest,
+ instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
self._attach_direct_passthrough_ports(context, instance, guest)
def _can_set_admin_password(self, image_meta):
@@ -3135,7 +3264,13 @@ class LibvirtDriver(driver.ComputeDriver):
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': err_msg})
- raise exception.InternalError(msg)
+
+ if error_code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
+ msg += (", libvirt cannot connect to the qemu-guest-agent"
+ " inside the instance.")
+ raise exception.InstanceQuiesceFailed(reason=msg)
+ else:
+ raise exception.InternalError(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
@@ -3173,17 +3308,18 @@ class LibvirtDriver(driver.ComputeDriver):
format=source_format,
basename=False)
disk_delta = out_path + '.delta'
- libvirt_utils.create_cow_image(src_back_path, disk_delta,
- src_disk_size)
+ libvirt_utils.create_image(
+ disk_delta, 'qcow2', src_disk_size, backing_file=src_back_path)
- quiesced = False
try:
- self._set_quiesced(context, instance, image_meta, True)
- quiesced = True
+ self._can_quiesce(instance, image_meta)
except exception.NovaException as err:
- if self._requires_quiesce(image_meta):
+ if image_meta.properties.get('os_require_quiesce', False):
+ LOG.error('Quiescing instance failed but image property '
+ '"os_require_quiesce" is set: %(reason)s.',
+ {'reason': err}, instance=instance)
raise
- LOG.info('Skipping quiescing instance: %(reason)s.',
+ LOG.info('Quiescing instance not available: %(reason)s.',
{'reason': err}, instance=instance)
try:
@@ -3204,12 +3340,24 @@ class LibvirtDriver(driver.ComputeDriver):
while not dev.is_job_complete():
time.sleep(0.5)
+ finally:
+ quiesced = False
+ try:
+ # NOTE: The freeze FS is applied after the end of
+ # the mirroring of the disk to minimize the time of
+ # the freeze. The mirror between both disks is finished,
+ # sync continuously, and stopped after abort_job().
+ self.quiesce(context, instance, image_meta)
+ quiesced = True
+ except exception.NovaException as err:
+ LOG.info('Skipping quiescing instance: %(reason)s.',
+ {'reason': err}, instance=instance)
+
dev.abort_job()
nova.privsep.path.chown(disk_delta, uid=os.getuid())
- finally:
self._host.write_instance_config(xml)
if quiesced:
- self._set_quiesced(context, instance, image_meta, False)
+ self.unquiesce(context, instance, image_meta)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
@@ -3897,7 +4045,12 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
- guest.shutdown()
+ try:
+ guest.shutdown()
+ except libvirt.libvirtError as e:
+ LOG.debug("Ignoring libvirt exception from shutdown request: %s",
+ encodeutils.exception_to_unicode(e),
+ instance=instance)
retry_countdown = retry_interval
for sec in range(timeout):
@@ -3977,8 +4130,12 @@ class LibvirtDriver(driver.ComputeDriver):
"""Suspend the specified instance."""
guest = self._host.get_guest(instance)
- self._detach_pci_devices(guest,
- pci_manager.get_instance_pci_devs(instance))
+ self._detach_pci_devices(
+ guest,
+ instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
self._detach_direct_passthrough_ports(context, instance, guest)
self._detach_mediated_devices(guest)
guest.save_memory_state()
@@ -3996,8 +4153,12 @@ class LibvirtDriver(driver.ComputeDriver):
guest = self._create_guest_with_network(
context, xml, instance, network_info, block_device_info,
vifs_already_plugged=True)
- self._attach_pci_devices(guest,
- pci_manager.get_instance_pci_devs(instance))
+ self._attach_pci_devices(
+ guest,
+ instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
self._attach_direct_passthrough_ports(
context, instance, guest, network_info)
self._attach_mediated_devices(guest, mdevs)
@@ -4244,6 +4405,11 @@ class LibvirtDriver(driver.ComputeDriver):
else:
LOG.info("Instance spawned successfully.", instance=instance)
+ # Finally register defaults for any undefined image properties so that
+ # future changes by QEMU, libvirt or within this driver don't change
+ # the ABI of the instance.
+ self._register_undefined_instance_details(context, instance)
+
def _get_console_output_file(self, instance, console_log):
bytes_to_read = MAX_CONSOLE_BYTES
log_data = b"" # The last N read bytes
@@ -4377,7 +4543,7 @@ class LibvirtDriver(driver.ComputeDriver):
'%dG' % ephemeral_size,
specified_fs)
return
- libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size)
+ libvirt_utils.create_image(target, 'raw', f'{ephemeral_size}G')
# Run as root only for block devices.
disk_api.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
@@ -4386,7 +4552,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_swap(target, swap_mb, context=None):
"""Create a swap file of specified size."""
- libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
+ libvirt_utils.create_image(target, 'raw', f'{swap_mb}M')
nova.privsep.fs.unprivileged_mkfs('swap', target)
@staticmethod
@@ -4509,12 +4675,16 @@ class LibvirtDriver(driver.ComputeDriver):
ignore_bdi_for_swap=False):
booted_from_volume = self._is_booted_from_volume(block_device_info)
- def image(fname, image_type=CONF.libvirt.images_type):
- return self.image_backend.by_name(instance,
- fname + suffix, image_type)
+ def image(
+ fname, image_type=CONF.libvirt.images_type, disk_info_mapping=None
+ ):
+ return self.image_backend.by_name(
+ instance, fname + suffix, image_type,
+ disk_info_mapping=disk_info_mapping)
- def raw(fname):
- return image(fname, image_type='raw')
+ def raw(fname, disk_info_mapping=None):
+ return image(
+ fname, image_type='raw', disk_info_mapping=disk_info_mapping)
created_instance_dir = True
@@ -4528,13 +4698,11 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug("Creating instance directory", instance=instance)
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
- LOG.info('Creating image', instance=instance)
+ LOG.info('Creating image(s)', instance=instance)
flavor = instance.get_flavor()
swap_mb = 0
if 'disk.swap' in disk_mapping:
- mapping = disk_mapping['disk.swap']
-
if ignore_bdi_for_swap:
# This is a workaround to support legacy swap resizing,
# which does not touch swap size specified in bdm,
@@ -4548,12 +4716,17 @@ class LibvirtDriver(driver.ComputeDriver):
# leaving the work with bdm only.
swap_mb = flavor['swap']
else:
+ disk_info_mapping = disk_mapping['disk.swap']
+ disk_device = disk_info_mapping['dev']
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
- elif (flavor['swap'] > 0 and
- not block_device.volume_in_mapping(
- mapping['dev'], block_device_info)):
+ elif (
+ flavor['swap'] > 0 and
+ not block_device.volume_in_mapping(
+ disk_device, block_device_info,
+ )
+ ):
swap_mb = flavor['swap']
if swap_mb > 0:
@@ -4586,8 +4759,8 @@ class LibvirtDriver(driver.ComputeDriver):
image_id=disk_images['ramdisk_id'])
created_disks = self._create_and_inject_local_root(
- context, instance, booted_from_volume, suffix, disk_images,
- injection_info, fallback_from_host)
+ context, instance, disk_mapping, booted_from_volume, suffix,
+ disk_images, injection_info, fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = nova.privsep.fs.get_fs_type_for_os_type(
@@ -4600,7 +4773,9 @@ class LibvirtDriver(driver.ComputeDriver):
vm_mode = fields.VMMode.get_from_instance(instance)
ephemeral_gb = instance.flavor.ephemeral_gb
if 'disk.local' in disk_mapping:
- disk_image = image('disk.local')
+ disk_info_mapping = disk_mapping['disk.local']
+ disk_image = image(
+ 'disk.local', disk_info_mapping=disk_info_mapping)
# Short circuit the exists() tests if we already created a disk
created_disks = created_disks or not disk_image.exists()
@@ -4619,7 +4794,9 @@ class LibvirtDriver(driver.ComputeDriver):
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
- disk_image = image(blockinfo.get_eph_disk(idx))
+ disk_name = blockinfo.get_eph_disk(idx)
+ disk_info_mapping = disk_mapping[disk_name]
+ disk_image = image(disk_name, disk_info_mapping=disk_info_mapping)
# Short circuit the exists() tests if we already created a disk
created_disks = created_disks or not disk_image.exists()
@@ -4658,7 +4835,7 @@ class LibvirtDriver(driver.ComputeDriver):
return (created_instance_dir, created_disks)
- def _create_and_inject_local_root(self, context, instance,
+ def _create_and_inject_local_root(self, context, instance, disk_mapping,
booted_from_volume, suffix, disk_images,
injection_info, fallback_from_host):
created_disks = False
@@ -4668,9 +4845,6 @@ class LibvirtDriver(driver.ComputeDriver):
injection_info is not None and
CONF.libvirt.inject_partition != -2)
- # NOTE(ndipanov): Even if disk_mapping was passed in, which
- # currently happens only on rescue - we still don't want to
- # create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images['image_id'])
size = instance.flavor.root_gb * units.Gi
@@ -4678,8 +4852,10 @@ class LibvirtDriver(driver.ComputeDriver):
if size == 0 or suffix == '.rescue':
size = None
- backend = self.image_backend.by_name(instance, 'disk' + suffix,
- CONF.libvirt.images_type)
+ disk_name = 'disk' + suffix
+ disk_info_mapping = disk_mapping[disk_name]
+ backend = self.image_backend.by_name(
+ instance, disk_name, disk_info_mapping=disk_info_mapping)
created_disks = not backend.exists()
if instance.task_state == task_states.RESIZE_FINISH:
@@ -4857,16 +5033,18 @@ class LibvirtDriver(driver.ComputeDriver):
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
- for hdev in [d for d in guest_config.devices
- if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
+ for hdev in [
+ d for d in guest_config.devices
+ if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)
+ ]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev.address)
- if [int(x, 16) for x in hdbsf] ==\
- [int(x, 16) for x in dbsf]:
- raise exception.PciDeviceDetachFailed(reason=
- "timeout",
- dev=dev)
-
+ if (
+ [int(x, 16) for x in hdbsf] ==
+ [int(x, 16) for x in dbsf]
+ ):
+ raise exception.PciDeviceDetachFailed(
+ reason="timeout", dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
@@ -4914,33 +5092,76 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
guest.attach_device(cfg)
+ # TODO(sean-k-mooney): we should try and converge this fuction with
+ # _detach_direct_passthrough_vifs which does the same operation correctly
+ # for live migration
def _detach_direct_passthrough_ports(self, context, instance, guest):
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_direct_passthrough_port(network_info):
- # In case of VNIC_TYPES_DIRECT_PASSTHROUGH ports we create
- # pci request per direct passthrough port. Therefore we can trust
- # that pci_slot value in the vif is correct.
- direct_passthrough_pci_addresses = [
+
+ attached_via_hostdev_element = []
+ attached_via_interface_element = []
+
+ for vif in network_info:
+ if vif['profile'].get('pci_slot') is None:
+ # this is not an sriov interface so skip it
+ continue
+
+ if (vif['vnic_type'] not in
+ network_model.VNIC_TYPES_DIRECT_PASSTHROUGH):
+ continue
+
+ cfg = self.vif_driver.get_config(
+ instance, vif, instance.image_meta, instance.flavor,
+ CONF.libvirt.virt_type)
+ LOG.debug(f'Detaching type: {type(cfg)}, data: {cfg}')
+ if isinstance(cfg, vconfig.LibvirtConfigGuestHostdevPCI):
+ attached_via_hostdev_element.append(vif)
+ else:
+ attached_via_interface_element.append(vif)
+
+ pci_devs = instance.get_pci_devices()
+ hostdev_pci_addresses = {
vif['profile']['pci_slot']
- for vif in network_info
- if (vif['vnic_type'] in
- network_model.VNIC_TYPES_DIRECT_PASSTHROUGH and
- vif['profile'].get('pci_slot') is not None)
+ for vif in attached_via_hostdev_element
+ }
+ direct_passthrough_pci_addresses = [
+ pci_dev for pci_dev in pci_devs
+ if pci_dev.address in hostdev_pci_addresses
]
- # use detach_pci_devices to avoid failure in case of
- # multiple guest direct passthrough ports with the same MAC
- # (protection use-case, ports are on different physical
- # interfaces)
- pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
- direct_passthrough_pci_addresses = (
- [pci_dev for pci_dev in pci_devs
- if pci_dev.address in direct_passthrough_pci_addresses])
+ # FIXME(sean-k-mooney): i am using _detach_pci_devices because
+ # of the previous comment introduced by change-id:
+ # I3a45b1fb41e8e446d1f25d7a1d77991c8bf2a1ed
+ # in relation to bug 1563874 however i'm not convinced that
+ # patch was correct so we should reevaluate if we should do this.
+ # The intent of using _detach_pci_devices is
+ # to somehow cater for the use case where multiple ports have
+ # the same MAC address however _detach_pci_device can only remove
+ # device that are attached as hostdev elements, not via the
+ # interface element.
+ # So using it for all devices would break vnic-type direct when
+ # using the sriov_nic_agent ml2 driver or vif of vnic_type vdpa.
+ # Since PF ports cant have the same MAC that means that this
+ # use case was for hardware offloaded OVS? many NICs do not allow
+ # two VFs to have the same MAC on different VLANs due to the
+ # ordering of the VLAN and MAC filters in there static packet
+ # processing pipeline as such its unclear if this will work in any
+ # non ovs offload case. We should look into this more closely
+ # as from my testing in this patch we appear to use the interface
+ # element for hardware offloaded ovs too. Infiniband and vnic_type
+ # direct-physical port type do need this code path, both those cant
+ # have duplicate MACs...
self._detach_pci_devices(guest, direct_passthrough_pci_addresses)
+ # for ports that are attached with interface elements we cannot use
+ # _detach_pci_devices so we use detach_interface
+ for vif in attached_via_interface_element:
+ self.detach_interface(context, instance, vif)
+
def _update_compute_provider_status(self, context, service):
"""Calls the ComputeVirtAPI.update_compute_provider_status method
@@ -5018,6 +5239,43 @@ class LibvirtDriver(driver.ComputeDriver):
else:
mount.get_manager().host_down()
+ def _check_emulation_arch(self, image_meta):
+ # NOTE(chateaulav) In order to support emulation via qemu,
+ # there are required metadata properties that need applied
+ # to the designated glance image. The config drive is not
+ # supported. This leverages the hw_architecture and
+ # hw_emulation_architecture image_meta fields to allow for
+ # emulation to take advantage of all physical multiarch work
+ # being done.
+ #
+ # aarch64 emulation support metadata values:
+ # 'hw_emulation_architecture=aarch64'
+ # 'hw_firmware_type=uefi'
+ # 'hw_machine_type=virt'
+ #
+ # ppc64le emulation support metadata values:
+ # 'hw_emulation_architecture=ppc64le'
+ # 'hw_machine_type=pseries'
+ #
+ # s390x emulation support metadata values:
+ # 'hw_emulation_architecture=s390x'
+ # 'hw_machine_type=s390-ccw-virtio'
+ # 'hw_video_model=virtio'
+ #
+ # TODO(chateaulav) Further Work to be done:
+ # testing mips functionality while waiting on redhat libvirt
+ # patch https://listman.redhat.com/archives/libvir-list/
+ # 2016-May/msg00197.html
+ #
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1432101
+ emulation_arch = image_meta.properties.get("hw_emulation_architecture")
+ if emulation_arch:
+ arch = emulation_arch
+ else:
+ arch = libvirt_utils.get_arch(image_meta)
+
+ return arch
+
def _get_cpu_model_mapping(self, model):
"""Get the CPU model mapping
@@ -5158,7 +5416,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_guest_cpu_config(self, flavor, image_meta,
guest_cpu_numa_config, instance_numa_topology):
- arch = libvirt_utils.get_arch(image_meta)
+ arch = self._check_emulation_arch(image_meta)
cpu = self._get_guest_cpu_model_config(flavor, arch)
if cpu is None:
@@ -5171,14 +5429,38 @@ class LibvirtDriver(driver.ComputeDriver):
cpu.threads = topology.threads
cpu.numa = guest_cpu_numa_config
+ caps = self._host.get_capabilities()
+ if arch != caps.host.cpu.arch:
+ # Try emulating. Other arch configs will go here
+ cpu.mode = None
+ if arch == fields.Architecture.AARCH64:
+ cpu.model = "cortex-a57"
+ elif arch == fields.Architecture.PPC64LE:
+ cpu.model = "POWER8"
+ # TODO(chateaulav): re-evaluate when libvirtd adds overall
+ # RISCV suuport as a supported architecture, as there is no
+ # cpu models associated, this simply associates X vcpus to the
+ # guest according to the flavor. Thes same issue should be
+ # present with mipsel due to same limitation, but has not been
+ # tested.
+ elif arch == fields.Architecture.MIPSEL:
+ cpu = None
+
return cpu
def _get_guest_disk_config(
self, instance, name, disk_mapping, flavor, image_type=None,
boot_order=None,
):
+ # NOTE(artom) To pass unit tests, wherein the code here is loaded
+ # *before* any config with self.flags() is done, we need to have the
+ # default inline in the method, and not in the kwarg declaration.
+ if image_type is None:
+ image_type = CONF.libvirt.images_type
disk_unit = None
- disk = self.image_backend.by_name(instance, name, image_type)
+ disk_info_mapping = disk_mapping[name]
+ disk = self.image_backend.by_name(
+ instance, name, image_type, disk_info_mapping=disk_info_mapping)
if (name == 'disk.config' and image_type == 'rbd' and
not disk.exists()):
# This is likely an older config drive that has not been migrated
@@ -5187,21 +5469,26 @@ class LibvirtDriver(driver.ComputeDriver):
# remove this fall back once we know all config drives are in rbd.
# NOTE(vladikr): make sure that the flat image exist, otherwise
# the image will be created after the domain definition.
- flat_disk = self.image_backend.by_name(instance, name, 'flat')
+ flat_disk = self.image_backend.by_name(
+ instance, name, 'flat', disk_info_mapping=disk_info_mapping)
if flat_disk.exists():
disk = flat_disk
LOG.debug('Config drive not found in RBD, falling back to the '
'instance directory', instance=instance)
- disk_info = disk_mapping[name]
- if 'unit' in disk_mapping and disk_info['bus'] == 'scsi':
+ # The 'unit' key is global to the disk_mapping (rather than for an
+ # individual disk) because it is used solely to track the incrementing
+ # unit number.
+ if 'unit' in disk_mapping and disk_info_mapping['bus'] == 'scsi':
disk_unit = disk_mapping['unit']
- disk_mapping['unit'] += 1 # Increments for the next disk added
+ disk_mapping['unit'] += 1 # Increments for the next disk
conf = disk.libvirt_info(
- disk_info, self.disk_cachemode, flavor['extra_specs'],
- disk_unit=disk_unit, boot_order=boot_order)
+ self.disk_cachemode, flavor['extra_specs'], disk_unit=disk_unit,
+ boot_order=boot_order)
return conf
- def _get_guest_fs_config(self, instance, name, image_type=None):
+ def _get_guest_fs_config(
+ self, instance, name, image_type=CONF.libvirt.images_type
+ ):
disk = self.image_backend.by_name(instance, name, image_type)
return disk.libvirt_fs_info("/", "ploop")
@@ -5514,15 +5801,11 @@ class LibvirtDriver(driver.ComputeDriver):
if not is_able or CONF.libvirt.virt_type not in ('lxc', 'kvm', 'qemu'):
return
- if guest.cputune is None:
- guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
- # Setting the default cpu.shares value to be a value
- # dependent on the number of vcpus
- guest.cputune.shares = 1024 * guest.vcpus
-
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
+ if guest.cputune is None:
+ guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
@@ -5857,7 +6140,7 @@ class LibvirtDriver(driver.ComputeDriver):
clk.add_timer(tmrtc)
hpet = image_meta.properties.get('hw_time_hpet', False)
- guestarch = libvirt_utils.get_arch(image_meta)
+ guestarch = self._check_emulation_arch(image_meta)
if guestarch in (fields.Architecture.I686,
fields.Architecture.X86_64):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
@@ -5888,9 +6171,9 @@ class LibvirtDriver(driver.ComputeDriver):
image_meta.properties.get('img_hide_hypervisor_id'))
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
- guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
+ guest.add_feature(vconfig.LibvirtConfigGuestFeatureACPI())
if not CONF.workarounds.libvirt_disable_apic:
- guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
+ guest.add_feature(vconfig.LibvirtConfigGuestFeatureAPIC())
if CONF.libvirt.virt_type in ('qemu', 'kvm') and os_type == 'windows':
hv = vconfig.LibvirtConfigGuestFeatureHyperV()
@@ -5902,6 +6185,15 @@ class LibvirtDriver(driver.ComputeDriver):
# with Microsoft
hv.spinlock_retries = 8191
hv.vapic = True
+ hv.vpindex = True
+ hv.runtime = True
+ hv.synic = True
+ hv.reset = True
+ hv.frequencies = True
+ hv.reenlightenment = True
+ hv.tlbflush = True
+ hv.ipi = True
+ hv.evmcs = True
# NOTE(kosamara): Spoofing the vendor_id aims to allow the nvidia
# driver to work on windows VMs. At the moment, the nvidia driver
@@ -5920,28 +6212,21 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.libvirt.virt_type in ("qemu", "kvm"):
# vmcoreinfo support is x86, ARM-only for now
- guestarch = libvirt_utils.get_arch(image_meta)
+ guestarch = self._check_emulation_arch(image_meta)
if guestarch in (
fields.Architecture.I686, fields.Architecture.X86_64,
fields.Architecture.AARCH64,
):
- guest.features.append(
+ guest.add_feature(
vconfig.LibvirtConfigGuestFeatureVMCoreInfo())
if hide_hypervisor_id:
- guest.features.append(
+ guest.add_feature(
vconfig.LibvirtConfigGuestFeatureKvmHidden())
- # NOTE(sean-k-mooney): we validate that the image and flavor
- # cannot have conflicting values in the compute API
- # so we just use the values directly. If it is not set in
- # either the flavor or image pmu will be none and we should
- # not generate the element to allow qemu to decide if a vPMU
- # should be provided for backwards compatibility.
- pmu = (flavor.extra_specs.get('hw:pmu') or
- image_meta.properties.get('hw_pmu'))
+ pmu = hardware.get_pmu_constraint(flavor, image_meta)
if pmu is not None:
- guest.features.append(
+ guest.add_feature(
vconfig.LibvirtConfigGuestFeaturePMU(pmu))
def _check_number_of_serial_console(self, num_ports):
@@ -5958,53 +6243,76 @@ class LibvirtDriver(driver.ComputeDriver):
def _add_video_driver(self, guest, image_meta, flavor):
video = vconfig.LibvirtConfigGuestVideo()
- # NOTE(ldbragst): The following logic sets the video.type
+ video.type = self._get_video_type(image_meta) or video.type
+ # Set video memory, only if the flavor's limit is set
+ video_ram = image_meta.properties.get('hw_video_ram', 0)
+ max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
+ if video_ram > max_vram:
+ raise exception.RequestedVRamTooHigh(req_vram=video_ram,
+ max_vram=max_vram)
+ if max_vram and video_ram:
+ video.vram = video_ram * units.Mi // units.Ki
+ guest.add_device(video)
+
+ # NOTE(sean-k-mooney): return the video device we added
+ # for simpler testing.
+ return video
+
+ def _get_video_type(
+ self,
+ image_meta: objects.ImageMeta,
+ ) -> ty.Optional[str]:
+ # NOTE(ldbragst): The following logic returns the video type
# depending on supported defaults given the architecture,
- # virtualization type, and features. The video.type attribute can
+ # virtualization type, and features. The video type can
# be overridden by the user with image_meta.properties, which
- # is carried out in the next if statement below this one.
- guestarch = libvirt_utils.get_arch(image_meta)
+ # is carried out first.
+ if image_meta.properties.get('hw_video_model'):
+ video_type = image_meta.properties.hw_video_model
+ if not self._video_model_supported(video_type):
+ raise exception.InvalidVideoMode(model=video_type)
+ return video_type
+
+ guestarch = self._check_emulation_arch(image_meta)
if CONF.libvirt.virt_type == 'parallels':
- video.type = 'vga'
+ return 'vga'
+
# NOTE(kchamart): 'virtio' is a sensible default whether or not
# the guest has the native kernel driver (called "virtio-gpu" in
# Linux) -- i.e. if the guest has the VirtIO GPU driver, it'll
# be used; otherwise, the 'virtio' model will gracefully
# fallback to VGA compatibiliy mode.
- elif (guestarch in (fields.Architecture.I686,
- fields.Architecture.X86_64) and not
- CONF.spice.enabled):
- video.type = 'virtio'
- elif guestarch in (fields.Architecture.PPC,
- fields.Architecture.PPC64,
- fields.Architecture.PPC64LE):
+ if (
+ guestarch in (
+ fields.Architecture.I686,
+ fields.Architecture.X86_64
+ ) and not CONF.spice.enabled
+ ):
+ return 'virtio'
+
+ if (
+ guestarch in (
+ fields.Architecture.PPC,
+ fields.Architecture.PPC64,
+ fields.Architecture.PPC64LE
+ )
+ ):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
- video.type = 'vga'
- elif guestarch == fields.Architecture.AARCH64:
+ return 'vga'
+
+ if guestarch == fields.Architecture.AARCH64:
# NOTE(kevinz): Only virtio device type is supported by AARCH64
# so use 'virtio' instead when running on AArch64 hardware.
- video.type = 'virtio'
+ return 'virtio'
+ elif guestarch == fields.Architecture.MIPSEL:
+ return 'virtio'
elif CONF.spice.enabled:
- video.type = 'qxl'
- if image_meta.properties.get('hw_video_model'):
- video.type = image_meta.properties.hw_video_model
- if not self._video_model_supported(video.type):
- raise exception.InvalidVideoMode(model=video.type)
-
- # Set video memory, only if the flavor's limit is set
- video_ram = image_meta.properties.get('hw_video_ram', 0)
- max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
- if video_ram > max_vram:
- raise exception.RequestedVRamTooHigh(req_vram=video_ram,
- max_vram=max_vram)
- if max_vram and video_ram:
- video.vram = video_ram * units.Mi // units.Ki
- guest.add_device(video)
+ return 'qxl'
- # NOTE(sean-k-mooney): return the video device we added
- # for simpler testing.
- return video
+ # NOTE(lyarwood): Return None and default to the default of
+ # LibvirtConfigGuestVideo.type that is currently virtio
+ return None
def _add_qga_device(self, guest, instance):
qga = vconfig.LibvirtConfigGuestChannel()
@@ -6135,6 +6443,11 @@ class LibvirtDriver(driver.ComputeDriver):
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.locked = True
+ if hardware.get_locked_memory_constraint(flavor, image_meta):
+ if not membacking:
+ membacking = vconfig.LibvirtConfigGuestMemoryBacking()
+ membacking.locked = True
+
return membacking
def _get_memory_backing_hugepages_support(self, inst_topology, numatune):
@@ -6239,12 +6552,21 @@ class LibvirtDriver(driver.ComputeDriver):
flavor: 'objects.Flavor',
) -> None:
if CONF.libvirt.virt_type in ("kvm", "qemu"):
- arch = libvirt_utils.get_arch(image_meta)
+ caps = self._host.get_capabilities()
+ host_arch = caps.host.cpu.arch
+ arch = self._check_emulation_arch(image_meta)
+ guest.os_arch = self._check_emulation_arch(image_meta)
+ if arch != host_arch:
+ # If emulating, downgrade to qemu
+ guest.virt_type = "qemu"
+
if arch in (fields.Architecture.I686, fields.Architecture.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
mach_type = libvirt_utils.get_machine_type(image_meta)
+ self._host._check_machine_type(caps, mach_type)
+
guest.os_mach_type = mach_type
hw_firmware_type = image_meta.properties.get('hw_firmware_type')
@@ -6292,9 +6614,10 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_loader_secure = False
try:
- loader, nvram_template = self._host.get_loader(
+ loader, nvram_template, requires_smm = (
+ self._host.get_loader(
arch, mach_type,
- has_secure_boot=guest.os_loader_secure)
+ has_secure_boot=guest.os_loader_secure))
except exception.UEFINotSupported as exc:
if guest.os_loader_secure:
# we raise a specific exception if we requested secure
@@ -6306,6 +6629,11 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_loader_type = 'pflash'
guest.os_nvram_template = nvram_template
+ # if the feature set says we need SMM then enable it
+ if requires_smm:
+ guest.features.append(
+ vconfig.LibvirtConfigGuestFeatureSMM())
+
# NOTE(lyarwood): If the machine type isn't recorded in the stashed
# image metadata then record it through the system metadata table.
# This will allow the host configuration to change in the future
@@ -6380,13 +6708,25 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_consoles_qemu_kvm(
guest_cfg, instance, flavor, image_meta)
- def _is_s390x_guest(self, image_meta):
- s390x_archs = (fields.Architecture.S390, fields.Architecture.S390X)
- return libvirt_utils.get_arch(image_meta) in s390x_archs
+ def _is_mipsel_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ archs = (fields.Architecture.MIPSEL, fields.Architecture.MIPS64EL)
+ return self._check_emulation_arch(image_meta) in archs
+
+ def _is_s390x_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ archs = (fields.Architecture.S390, fields.Architecture.S390X)
+ return self._check_emulation_arch(image_meta) in archs
- def _is_ppc64_guest(self, image_meta):
+ def _is_ppc64_guest(self, image_meta: 'objects.ImageMeta') -> bool:
archs = (fields.Architecture.PPC64, fields.Architecture.PPC64LE)
- return libvirt_utils.get_arch(image_meta) in archs
+ return self._check_emulation_arch(image_meta) in archs
+
+ def _is_aarch64_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ arch = fields.Architecture.AARCH64
+ return self._check_emulation_arch(image_meta) == arch
+
+ def _is_x86_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ archs = (fields.Architecture.I686, fields.Architecture.X86_64)
+ return self._check_emulation_arch(image_meta) in archs
def _create_consoles_qemu_kvm(self, guest_cfg, instance, flavor,
image_meta):
@@ -6555,7 +6895,19 @@ class LibvirtDriver(driver.ComputeDriver):
# controller (x86 gets one by default)
usbhost.model = None
if not self._guest_needs_usb(guest, image_meta):
- usbhost.model = 'none'
+ archs = (
+ fields.Architecture.PPC,
+ fields.Architecture.PPC64,
+ fields.Architecture.PPC64LE,
+ )
+ if self._check_emulation_arch(image_meta) in archs:
+ # NOTE(chateaulav): during actual testing and implementation
+ # it wanted None for ppc, as this removes it from the domain
+ # xml, where 'none' adds it but then disables it causing
+ # libvirt errors and the instances not being able to build
+ usbhost.model = None
+ else:
+ usbhost.model = 'none'
guest.add_device(usbhost)
def _guest_add_pcie_root_ports(self, guest):
@@ -6581,38 +6933,28 @@ class LibvirtDriver(driver.ComputeDriver):
"""
caps = self._host.get_capabilities()
- # TODO(kchamart) In the third 'if' conditional below, for 'x86'
- # arch, we're assuming: when 'os_mach_type' is 'None', you'll
- # have "pc" machine type. That assumption, although it is
- # correct for the "forseeable future", it will be invalid when
- # libvirt / QEMU changes the default machine types.
- #
- # From libvirt 4.7.0 onwards (September 2018), it will ensure
- # that *if* 'pc' is available, it will be used as the default --
- # to not break existing applications. (Refer:
- # https://libvirt.org/git/?p=libvirt.git;a=commit;h=26cfb1a3
- # --"qemu: ensure default machine types don't change if QEMU
- # changes").
- #
- # But even if libvirt (>=v4.7.0) handled the default case,
- # relying on such assumptions is not robust. Instead we should
- # get the default machine type for a given architecture reliably
- # -- by Nova setting it explicitly (we already do it for Arm /
- # AArch64 & s390x). A part of this bug is being tracked here:
- # https://bugs.launchpad.net/nova/+bug/1780138).
-
# Add PCIe root port controllers for PCI Express machines
# but only if their amount is configured
if not CONF.libvirt.num_pcie_ports:
return False
- if (caps.host.cpu.arch == fields.Architecture.AARCH64 and
- guest.os_mach_type.startswith('virt')):
+
+ # Only certain architectures and machine types can handle PCIe ports;
+ # the latter will be handled by libvirt.utils.get_machine_type
+
+ if (
+ caps.host.cpu.arch == fields.Architecture.AARCH64 and
+ guest.os_mach_type.startswith('virt')
+ ):
return True
- if (caps.host.cpu.arch == fields.Architecture.X86_64 and
- guest.os_mach_type is not None and
- 'q35' in guest.os_mach_type):
+
+ if (
+ caps.host.cpu.arch == fields.Architecture.X86_64 and
+ guest.os_mach_type is not None and
+ 'q35' in guest.os_mach_type
+ ):
return True
+
return False
def _get_guest_config(self, instance, network_info, image_meta,
@@ -6763,6 +7105,8 @@ class LibvirtDriver(driver.ComputeDriver):
if vpmems:
self._guest_add_vpmems(guest, vpmems)
+ self._guest_add_iommu_device(guest, image_meta, flavor)
+
return guest
def _get_ordered_vpmems(self, instance, flavor):
@@ -6787,8 +7131,10 @@ class LibvirtDriver(driver.ComputeDriver):
size_kb = vpmem.size // units.Ki
align_kb = vpmem.align // units.Ki
- vpmem_config = vconfig.LibvirtConfigGuestVPMEM(
- devpath=vpmem.devpath, size_kb=size_kb, align_kb=align_kb)
+ vpmem_config = vconfig.LibvirtConfigGuestVPMEM()
+ vpmem_config.source_path = vpmem.devpath
+ vpmem_config.target_size = size_kb
+ vpmem_config.align_size = align_kb
# max memory size needs contain vpmem size
guest.max_memory_size += size_kb
@@ -6925,11 +7271,13 @@ class LibvirtDriver(driver.ComputeDriver):
def _guest_add_pci_devices(self, guest, instance):
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
# Get all generic PCI devices (non-SR-IOV).
- for pci_dev in pci_manager.get_instance_pci_devs(instance):
+ for pci_dev in instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
# PCI devices is only supported for QEMU/KVM hypervisor
- if pci_manager.get_instance_pci_devs(instance, 'all'):
+ if instance.get_pci_devices():
raise exception.PciDeviceUnsupportedHypervisor(
type=CONF.libvirt.virt_type
)
@@ -6968,18 +7316,21 @@ class LibvirtDriver(driver.ComputeDriver):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.listen = CONF.spice.server_listen
+ graphics.image_compression = CONF.spice.image_compression
+ graphics.jpeg_compression = CONF.spice.jpeg_compression
+ graphics.zlib_compression = CONF.spice.zlib_compression
+ graphics.playback_compression = CONF.spice.playback_compression
+ graphics.streaming_mode = CONF.spice.streaming_mode
guest.add_device(graphics)
add_video_driver = True
return add_video_driver
- def _guest_add_pointer_device(self, guest, image_meta):
- """Build the pointer device to add to the instance.
-
- The configuration is determined by examining the 'hw_input_bus' image
- metadata property, the 'hw_pointer_model' image metadata property, and
- the '[DEFAULT] pointer_model' config option in that order.
- """
+ def _get_pointer_bus_and_model(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: objects.ImageMeta,
+ ) -> ty.Tuple[ty.Optional[str], ty.Optional[str]]:
pointer_bus = image_meta.properties.get('hw_input_bus')
pointer_model = image_meta.properties.get('hw_pointer_model')
@@ -6993,7 +7344,7 @@ class LibvirtDriver(driver.ComputeDriver):
else:
# If the user hasn't requested anything and the host config says to
# use something other than a USB tablet, there's nothing to do
- return
+ return None, None
# For backward compatibility, we don't want to error out if the host
# configuration requests a USB tablet but the virtual machine mode is
@@ -7003,7 +7354,7 @@ class LibvirtDriver(driver.ComputeDriver):
'USB tablet requested for guests on non-HVM host; '
'in order to accept this request the machine mode should '
'be configured as HVM.')
- return
+ return None, None
# Ditto for using a USB tablet when the SPICE agent is enabled, since
# that has a paravirt mouse builtin which drastically reduces overhead;
@@ -7017,15 +7368,32 @@ class LibvirtDriver(driver.ComputeDriver):
'USB tablet requested for guests but the SPICE agent is '
'enabled; ignoring request in favour of default '
'configuration.')
- return
+ return None, None
- pointer = vconfig.LibvirtConfigGuestInput()
- pointer.type = pointer_model
- pointer.bus = pointer_bus
- guest.add_device(pointer)
+ return pointer_model, pointer_bus
- # returned for unit testing purposes
- return pointer
+ def _guest_add_pointer_device(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: objects.ImageMeta
+ ) -> None:
+ """Build the pointer device to add to the instance.
+
+ The configuration is determined by examining the 'hw_input_bus' image
+ metadata property, the 'hw_pointer_model' image metadata property, and
+ the '[DEFAULT] pointer_model' config option in that order.
+ """
+ pointer_model, pointer_bus = self._get_pointer_bus_and_model(
+ guest, image_meta)
+
+ if pointer_model and pointer_bus:
+ pointer = vconfig.LibvirtConfigGuestInput()
+ pointer.type = pointer_model
+ pointer.bus = pointer_bus
+ guest.add_device(pointer)
+
+ # returned for unit testing purposes
+ return pointer
def _guest_add_keyboard_device(self, guest, image_meta):
"""Add keyboard for graphical console use."""
@@ -7037,7 +7405,7 @@ class LibvirtDriver(driver.ComputeDriver):
# libvirt will automatically add a PS2 keyboard)
# TODO(stephenfin): We might want to do this for other non-x86
# architectures
- arch = libvirt_utils.get_arch(image_meta)
+ arch = self._check_emulation_arch(image_meta)
if arch != fields.Architecture.AARCH64:
return None
@@ -7051,6 +7419,92 @@ class LibvirtDriver(driver.ComputeDriver):
# returned for unit testing purposes
return keyboard
+ def _get_iommu_model(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: 'objects.ImageMeta',
+ flavor: 'objects.Flavor',
+ ) -> ty.Optional[str]:
+ model = flavor.extra_specs.get(
+ 'hw:viommu_model') or image_meta.properties.get(
+ 'hw_viommu_model')
+ if not model:
+ return None
+
+ is_x86 = self._is_x86_guest(image_meta)
+ is_aarch64 = self._is_aarch64_guest(image_meta)
+
+ if is_x86:
+ if guest.os_mach_type is not None and not (
+ 'q35' in guest.os_mach_type
+ ):
+ arch = self._check_emulation_arch(image_meta)
+ mtype = guest.os_mach_type if (
+ guest.os_mach_type is not None
+ ) else "unknown"
+ raise exception.InvalidVIOMMUMachineType(
+ mtype=mtype, arch=arch)
+ elif is_aarch64:
+ if guest.os_mach_type is not None and not (
+ 'virt' in guest.os_mach_type
+ ):
+ arch = self._check_emulation_arch(image_meta)
+ mtype = guest.os_mach_type if (
+ guest.os_mach_type is not None
+ ) else "unknown"
+ raise exception.InvalidVIOMMUMachineType(
+ mtype=mtype, arch=arch)
+ else:
+ raise exception.InvalidVIOMMUArchitecture(
+ arch=self._check_emulation_arch(image_meta))
+
+ if model == fields.VIOMMUModel.AUTO:
+ if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL):
+ model = fields.VIOMMUModel.VIRTIO
+ elif self._is_x86_guest(image_meta) and (
+ guest.os_mach_type is not None and 'q35' in guest.os_mach_type
+ ):
+ model = fields.VIOMMUModel.INTEL
+ else:
+ # AArch64
+ model = fields.VIOMMUModel.SMMUV3
+ return model
+
+ def _guest_add_iommu_device(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: 'objects.ImageMeta',
+ flavor: 'objects.Flavor',
+ ) -> None:
+ """Add a virtual IOMMU device to allow e.g. vfio-pci usage."""
+ if CONF.libvirt.virt_type not in ('qemu', 'kvm'):
+ # vIOMMU requires QEMU
+ return
+
+ iommu = vconfig.LibvirtConfigGuestIOMMU()
+
+ iommu.model = self._get_iommu_model(guest, image_meta, flavor)
+ if iommu.model is None:
+ return
+
+ iommu.interrupt_remapping = True
+ iommu.caching_mode = True
+ iommu.iotlb = True
+
+ # As Qemu supported values are 39 and 48, we set this to
+ # larger width (48) by default and will not exposed to end user.
+ if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_AW_BITS):
+ iommu.aw_bits = 48
+
+ if guest.os_mach_type is not None and 'q35' in guest.os_mach_type:
+ iommu.eim = True
+ else:
+ iommu.eim = False
+ guest.add_device(iommu)
+
+ ioapic = vconfig.LibvirtConfigGuestFeatureIOAPIC()
+ guest.add_feature(ioapic)
+
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None,
@@ -7182,7 +7636,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance: 'objects.Instance',
power_on: bool = True,
pause: bool = False,
- post_xml_callback: ty.Callable = None,
+ post_xml_callback: ty.Optional[ty.Callable] = None,
) -> libvirt_guest.Guest:
"""Create a Guest from XML.
@@ -7208,6 +7662,7 @@ class LibvirtDriver(driver.ComputeDriver):
post_xml_callback()
if power_on or pause:
+ libvirt_cpu.power_up(instance)
guest.launch(pause=pause)
return guest
@@ -7230,7 +7685,8 @@ class LibvirtDriver(driver.ComputeDriver):
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
- for vif in network_info if vif.get('active', True) is False]
+ for vif in network_info if vif.get('active', True) is False and
+ vif['vnic_type'] != network_model.VNIC_TYPE_REMOTE_MANAGED]
def _create_guest_with_network(
self,
@@ -7241,7 +7697,7 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info: ty.Optional[ty.Dict[str, ty.Any]],
power_on: bool = True,
vifs_already_plugged: bool = False,
- post_xml_callback: ty.Callable = None,
+ post_xml_callback: ty.Optional[ty.Callable] = None,
external_events: ty.Optional[ty.List[ty.Tuple[str, str]]] = None,
cleanup_instance_dir: bool = False,
cleanup_instance_disks: bool = False,
@@ -7273,17 +7729,9 @@ class LibvirtDriver(driver.ComputeDriver):
pause=pause, power_on=power_on,
post_xml_callback=post_xml_callback)
except eventlet.timeout.Timeout:
- # We never heard from Neutron
- LOG.warning(
- 'Timeout waiting for %(events)s for instance with '
- 'vm_state %(vm_state)s and task_state %(task_state)s',
- {
- 'events': events,
- 'vm_state': instance.vm_state,
- 'task_state': instance.task_state,
- },
- instance=instance)
-
+ # We did not receive all expected events from Neutron, a warning
+ # has already been logged by wait_for_instance_event, but we need
+ # to decide if the issue is fatal.
if CONF.vif_plugging_is_fatal:
# NOTE(stephenfin): don't worry, guest will be in scope since
# we can only hit this branch if the VIF plug timed out
@@ -7319,15 +7767,18 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.compute.cpu_dedicated_set:
return set()
- online_cpus = self._host.get_online_cpus()
+ if CONF.libvirt.cpu_power_management:
+ available_cpus = self._host.get_available_cpus()
+ else:
+ available_cpus = self._host.get_online_cpus()
dedicated_cpus = hardware.get_cpu_dedicated_set()
- if not dedicated_cpus.issubset(online_cpus):
+ if not dedicated_cpus.issubset(available_cpus):
msg = _("Invalid '[compute] cpu_dedicated_set' config: one or "
- "more of the configured CPUs is not online. Online "
- "cpuset(s): %(online)s, configured cpuset(s): %(req)s")
+ "more of the configured CPUs is not available. Available "
+ "cpuset(s): %(available)s, configured cpuset(s): %(req)s")
raise exception.Invalid(msg % {
- 'online': sorted(online_cpus),
+ 'available': sorted(available_cpus),
'req': sorted(dedicated_cpus)})
return dedicated_cpus
@@ -7530,15 +7981,7 @@ class LibvirtDriver(driver.ComputeDriver):
device_address = self._get_pci_id_from_libvirt_name(device_address)
if not device_address:
return
- try:
- return self.pgpu_type_mapping.get(device_address)
- except KeyError:
- LOG.warning("No mdev type was configured for PCI address: %s",
- device_address)
- # We accept to return None instead of raising an exception
- # because we prefer the callers to return the existing exceptions
- # in case we can't find a specific pGPU
- return
+ return self.pgpu_type_mapping.get(device_address)
def _get_resource_class_for_device(self, device_address):
"""Returns the resource class for the inventory of this device.
@@ -7738,13 +8181,33 @@ class LibvirtDriver(driver.ComputeDriver):
dev.name(): dev for dev in
self._host.list_all_devices(flags=dev_flags)
}
- net_devs = [dev for dev in devices.values() if "net" in dev.listCaps()]
+
+ # NOTE(mnaser): The listCaps() function can raise an exception if the
+ # device disappeared while we're looping, this method
+ # returns an empty list rather than raising an exception
+ # which will remove the device for Nova's resource
+ # tracker, but that is OK since the device disappeared.
+ def _safe_list_caps(dev):
+ try:
+ return dev.listCaps()
+ except libvirt.libvirtError:
+ return []
+
+ net_devs = [
+ dev for dev in devices.values() if "net" in _safe_list_caps(dev)
+ ]
vdpa_devs = [
- dev for dev in devices.values() if "vdpa" in dev.listCaps()
+ dev for dev in devices.values() if "vdpa" in _safe_list_caps(dev)
]
+ pci_devs = {
+ name: dev for name, dev in devices.items()
+ if "pci" in _safe_list_caps(dev)}
pci_info = [
- self._host._get_pcidev_info(name, dev, net_devs, vdpa_devs)
- for name, dev in devices.items() if "pci" in dev.listCaps()
+ self._host._get_pcidev_info(
+ name, dev, net_devs,
+ vdpa_devs, list(pci_devs.values())
+ )
+ for name, dev in pci_devs.items()
]
return jsonutils.dumps(pci_info)
@@ -7793,15 +8256,52 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_mediated_device_information(self, devname):
"""Returns a dict of a mediated device."""
- virtdev = self._host.device_lookup_by_name(devname)
+ # LP #1951656 - In Libvirt 7.7, the mdev name now includes the PCI
+ # address of the parent device (e.g. mdev_<uuid>_<pci_address>) due to
+ # the mdevctl allowing for multiple mediated devs having the same UUID
+ # defined (only one can be active at a time). Since the guest
+ # information doesn't have the parent ID, try to lookup which
+ # mediated device is available that matches the UUID. If multiple
+ # devices are found that match the UUID, then this is an error
+ # condition.
+ try:
+ virtdev = self._host.device_lookup_by_name(devname)
+ except libvirt.libvirtError as ex:
+ if ex.get_error_code() != libvirt.VIR_ERR_NO_NODE_DEVICE:
+ raise
+ mdevs = [dev for dev in self._host.list_mediated_devices()
+ if dev.startswith(devname)]
+ # If no matching devices are found, simply raise the original
+ # exception indicating that no devices are found.
+ if not mdevs:
+ raise
+ elif len(mdevs) > 1:
+ msg = ("The mediated device name %(devname)s refers to a UUID "
+ "that is present in multiple libvirt mediated devices. "
+ "Matching libvirt mediated devices are %(devices)s. "
+ "Mediated device UUIDs must be unique for Nova." %
+ {'devname': devname,
+ 'devices': ', '.join(mdevs)})
+ raise exception.InvalidLibvirtMdevConfig(reason=msg)
+
+ LOG.debug('Found requested device %s as %s. Using that.',
+ devname, mdevs[0])
+ virtdev = self._host.device_lookup_by_name(mdevs[0])
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
+ # Starting with Libvirt 7.3, the uuid information is available in the
+ # node device information. If its there, use that. Otherwise,
+ # fall back to the previous behavior of parsing the uuid from the
+ # devname.
+ if cfgdev.mdev_information.uuid:
+ mdev_uuid = cfgdev.mdev_information.uuid
+ else:
+ mdev_uuid = libvirt_utils.mdev_name2uuid(cfgdev.name)
device = {
"dev_id": cfgdev.name,
- # name is like mdev_00ead764_fdc0_46b6_8db9_2963f5c815b4
- "uuid": libvirt_utils.mdev_name2uuid(cfgdev.name),
+ "uuid": mdev_uuid,
# the physical GPU PCI device
"parent": cfgdev.parent,
"type": cfgdev.mdev_information.type,
@@ -7889,6 +8389,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param requested_types: Filter out the result for only mediated devices
having those types.
"""
+ LOG.debug('Searching for available mdevs...')
allocated_mdevs = self._get_all_assigned_mediated_devices()
mdevs = self._get_mediated_devices(requested_types)
available_mdevs = set()
@@ -7904,6 +8405,7 @@ class LibvirtDriver(driver.ComputeDriver):
available_mdevs.add(mdev["uuid"])
available_mdevs -= set(allocated_mdevs)
+ LOG.info('Available mdevs at: %s.', available_mdevs)
return available_mdevs
def _create_new_mediated_device(self, parent, uuid=None):
@@ -7915,6 +8417,7 @@ class LibvirtDriver(driver.ComputeDriver):
:returns: the newly created mdev UUID or None if not possible
"""
+ LOG.debug('Attempting to create new mdev...')
supported_types = self.supported_vgpu_types
# Try to see if we can still create a new mediated device
devices = self._get_mdev_capable_devices(supported_types)
@@ -7926,6 +8429,7 @@ class LibvirtDriver(driver.ComputeDriver):
# The device is not the one that was called, not creating
# the mdev
continue
+ LOG.debug('Trying on: %s.', dev_name)
dev_supported_type = self._get_vgpu_type_per_pgpu(dev_name)
if dev_supported_type and device['types'][
dev_supported_type]['availableInstances'] > 0:
@@ -7935,7 +8439,13 @@ class LibvirtDriver(driver.ComputeDriver):
pci_addr = "{}:{}:{}.{}".format(*dev_name[4:].split('_'))
chosen_mdev = nova.privsep.libvirt.create_mdev(
pci_addr, dev_supported_type, uuid=uuid)
+ LOG.info('Created mdev: %s on pGPU: %s.',
+ chosen_mdev, pci_addr)
return chosen_mdev
+ LOG.debug('Failed: No available instances on device.')
+ LOG.info('Failed to create mdev. '
+ 'No free space found among the following devices: %s.',
+ [dev['dev_id'] for dev in devices])
@utils.synchronized(VGPU_RESOURCE_SEMAPHORE)
def _allocate_mdevs(self, allocations):
@@ -8018,6 +8528,8 @@ class LibvirtDriver(driver.ComputeDriver):
# Take the first available mdev
chosen_mdev = mdevs_available.pop()
else:
+ LOG.debug('No available mdevs where found. '
+ 'Creating an new one...')
chosen_mdev = self._create_new_mediated_device(parent_device)
if not chosen_mdev:
# If we can't find devices having available VGPUs, just raise
@@ -8025,6 +8537,7 @@ class LibvirtDriver(driver.ComputeDriver):
reason='mdev-capable resource is not available')
else:
chosen_mdevs.append(chosen_mdev)
+ LOG.info('Allocated mdev: %s.', chosen_mdev)
return chosen_mdevs
def _detach_mediated_devices(self, guest):
@@ -8501,6 +9014,7 @@ class LibvirtDriver(driver.ComputeDriver):
traits.update(self._get_storage_bus_traits())
traits.update(self._get_video_model_traits())
traits.update(self._get_vif_model_traits())
+ traits.update(self._get_iommu_model_traits())
traits.update(self._get_tpm_traits())
_, invalid_traits = ot.check_traits(traits)
@@ -9026,6 +9540,7 @@ class LibvirtDriver(driver.ComputeDriver):
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
+ data["uuid"] = self._host.get_node_uuid()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
@@ -9112,15 +9627,16 @@ class LibvirtDriver(driver.ComputeDriver):
disk_available_mb = (
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb)
- # Compare CPU
- try:
- if not instance.vcpu_model or not instance.vcpu_model.model:
- source_cpu_info = src_compute_info['cpu_info']
- self._compare_cpu(None, source_cpu_info, instance)
- else:
- self._compare_cpu(instance.vcpu_model, None, instance)
- except exception.InvalidCPUInfo as e:
- raise exception.MigrationPreCheckError(reason=e)
+ if not CONF.workarounds.skip_cpu_compare_on_dest:
+ # Compare CPU
+ try:
+ if not instance.vcpu_model or not instance.vcpu_model.model:
+ source_cpu_info = src_compute_info['cpu_info']
+ self._compare_cpu(None, source_cpu_info, instance)
+ else:
+ self._compare_cpu(instance.vcpu_model, None, instance)
+ except exception.InvalidCPUInfo as e:
+ raise exception.MigrationPreCheckError(reason=e)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file(instance)
@@ -9155,7 +9671,7 @@ class LibvirtDriver(driver.ComputeDriver):
# populate it if we are using multiple port bindings.
# TODO(stephenfin): Remove once we can do this unconditionally in X or
# later
- if self._network_api.supports_port_binding_extension(context):
+ if self._network_api.has_port_binding_extension(context):
data.vifs = (
migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
instance.get_network_info()))
@@ -9478,7 +9994,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
- ret = self._host.compare_cpu(cpu_xml)
+ ret = self._host.compare_hypervisor_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
@@ -9555,7 +10071,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
# 'dest' will be substituted into 'migration_uri' so ensure
- # it does't contain any characters that could be used to
+ # it doesn't contain any characters that could be used to
# exploit the URI accepted by libvirt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)
@@ -10192,10 +10708,13 @@ class LibvirtDriver(driver.ComputeDriver):
:param instance: the instance being migrated
:param migrate_date: a LibvirtLiveMigrateData object
"""
- network_info = network_model.NetworkInfo(
- [vif.source_vif for vif in migrate_data.vifs
- if "source_vif" in vif and vif.source_vif])
- self._reattach_instance_vifs(context, instance, network_info)
+ # NOTE(artom) migrate_data.vifs might not be set if our Neutron doesn't
+ # have the multiple port bindings extension.
+ if 'vifs' in migrate_data and migrate_data.vifs:
+ network_info = network_model.NetworkInfo(
+ [vif.source_vif for vif in migrate_data.vifs
+ if "source_vif" in vif and vif.source_vif])
+ self._reattach_instance_vifs(context, instance, network_info)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
@@ -10476,14 +10995,13 @@ class LibvirtDriver(driver.ComputeDriver):
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
- libvirt_utils.create_image(info['type'], instance_disk,
- info['virt_disk_size'])
+ libvirt_utils.create_image(
+ instance_disk, info['type'], info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
- disk = self.image_backend.by_name(instance, instance_disk,
- CONF.libvirt.images_type)
+ disk = self.image_backend.by_name(instance, instance_disk)
if cache_name.startswith('ephemeral'):
# The argument 'size' is used by image.cache to
# validate disk size retrieved from cache against
@@ -10559,16 +11077,37 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.workarounds.enable_qemu_monitor_announce_self:
return
- LOG.info('Sending announce-self command to QEMU monitor',
- instance=instance)
+ current_attempt = 0
- try:
- guest = self._host.get_guest(instance)
- guest.announce_self()
- except Exception:
- LOG.warning('Failed to send announce-self command to QEMU monitor',
- instance=instance)
- LOG.exception()
+ max_attempts = (
+ CONF.workarounds.qemu_monitor_announce_self_count)
+ # qemu_monitor_announce_retry_interval specified in seconds
+ announce_pause = (
+ CONF.workarounds.qemu_monitor_announce_self_interval)
+
+ while(current_attempt < max_attempts):
+ # Increment attempt
+ current_attempt += 1
+
+ # Only use announce_pause after the first attempt to avoid
+ # pausing before calling announce_self for the first attempt
+ if current_attempt != 1:
+ greenthread.sleep(announce_pause)
+
+ LOG.info('Sending announce-self command to QEMU monitor. '
+ 'Attempt %(current_attempt)s of %(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ try:
+ guest = self._host.get_guest(instance)
+ guest.announce_self()
+ except Exception:
+ LOG.warning('Failed to send announce-self command to '
+ 'QEMU monitor. Attempt %(current_attempt)s of '
+ '%(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ LOG.exception()
def post_live_migration_at_destination(self, context,
instance,
@@ -10818,6 +11357,9 @@ class LibvirtDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {self._host.get_node_uuid(): self._host.get_hostname()}
+
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
@@ -10973,6 +11515,9 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = self._get_instance_disk_info(instance, block_device_info)
try:
+ # If cleanup failed in previous resize attempts we try to remedy
+ # that before a resize is tried again
+ self._cleanup_failed_instance_base(inst_base_resize)
os.rename(inst_base, inst_base_resize)
# if we are migrating the instance with shared instance path then
# create the directory. If it is a remote node the directory
@@ -11196,9 +11741,9 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug("finish_migration finished successfully.", instance=instance)
- def _cleanup_failed_migration(self, inst_base):
- """Make sure that a failed migrate doesn't prevent us from rolling
- back in a revert.
+ def _cleanup_failed_instance_base(self, inst_base):
+ """Make sure that a failed migrate or resize doesn't prevent us from
+ rolling back in a revert or retrying a resize.
"""
try:
shutil.rmtree(inst_base)
@@ -11254,7 +11799,7 @@ class LibvirtDriver(driver.ComputeDriver):
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
- self._cleanup_failed_migration(inst_base)
+ self._cleanup_failed_instance_base(inst_base)
os.rename(inst_base_resize, inst_base)
root_disk = self.image_backend.by_name(instance, 'disk')
@@ -11771,6 +12316,30 @@ class LibvirtDriver(driver.ComputeDriver):
in supported_models for model in all_models
}
+ def _get_iommu_model_traits(self) -> ty.Dict[str, bool]:
+ """Get iommu model traits based on the currently enabled virt_type.
+ Not all traits generated by this function may be valid and the result
+ should be validated.
+ :return: A dict of trait names mapped to boolean values.
+ """
+ dom_caps = self._host.get_domain_capabilities()
+ supported_models: ty.Set[str] = {fields.VIOMMUModel.AUTO}
+ # our min version of qemu/libvirt supprot q35 and virt machine types.
+ # They also support the smmuv3 and intel iommu modeles so if the qemu
+ # binary is avaiable we can report the trait.
+ if fields.Architecture.AARCH64 in dom_caps:
+ supported_models.add(fields.VIOMMUModel.SMMUV3)
+ if fields.Architecture.X86_64 in dom_caps:
+ supported_models.add(fields.VIOMMUModel.INTEL)
+ # the virtio iommu model requires a newer libvirt then our min
+ # libvirt so we need to check the version explcitly.
+ if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL):
+ supported_models.add(fields.VIOMMUModel.VIRTIO)
+ return {
+ f'COMPUTE_VIOMMU_MODEL_{model.replace("-", "_").upper()}': model
+ in supported_models for model in fields.VIOMMUModel.ALL
+ }
+
def _get_storage_bus_traits(self) -> ty.Dict[str, bool]:
"""Get storage bus traits based on the currently enabled virt_type.
diff --git a/nova/virt/libvirt/event.py b/nova/virt/libvirt/event.py
index a7d2a3624f..56951dc11c 100644
--- a/nova/virt/libvirt/event.py
+++ b/nova/virt/libvirt/event.py
@@ -9,6 +9,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import typing as ty
+
from nova.virt import event
@@ -22,7 +24,10 @@ class LibvirtEvent(event.InstanceEvent):
class DeviceEvent(LibvirtEvent):
"""Base class for device related libvirt events"""
- def __init__(self, uuid: str, dev: str, timestamp: float = None):
+ def __init__(self,
+ uuid: str,
+ dev: str,
+ timestamp: ty.Optional[float] = None):
super().__init__(uuid, timestamp)
self.dev = dev
diff --git a/nova/virt/libvirt/guest.py b/nova/virt/libvirt/guest.py
index 53080e41f0..c40c3c4a7f 100644
--- a/nova/virt/libvirt/guest.py
+++ b/nova/virt/libvirt/guest.py
@@ -254,8 +254,17 @@ class Guest(object):
"""
if cfg:
+ LOG.debug(f'looking for interface given config: {cfg}')
interfaces = self.get_all_devices(
type(cfg), from_persistent_config)
+ if not interfaces:
+ LOG.debug(f'No interface of type: {type(cfg)} found in domain')
+ return None
+ # FIXME(sean-k-mooney): we should be able to print the list of
+ # interfaces however some tests use incomplete objects that cant
+ # be printed due to incomplete mocks or defects in the libvirt
+ # fixture. Lets address this later.
+ # LOG.debug(f'within interfaces: {list(interfaces)}')
for interface in interfaces:
# NOTE(leehom) LibvirtConfigGuest get from domain and
# LibvirtConfigGuest generated by
@@ -264,6 +273,16 @@ class Guest(object):
# equality check based on available information on nova side
if cfg == interface:
return interface
+ else:
+ # NOTE(sean-k-mooney): {list(interfaces)} could be used
+ # instead of self._domain.XMLDesc(0) once all tests have
+ # printable interfaces see the comment above ^.
+ # While the XML is more verbose it should always work
+ # for our current test suite and in production code.
+ LOG.debug(
+ f'interface for config: {cfg}'
+ f'not found in domain: {self._domain.XMLDesc(0)}'
+ )
return None
def get_vcpus_info(self):
@@ -533,7 +552,7 @@ class Guest(object):
:param no_metadata: Make snapshot without remembering it
:param disk_only: Disk snapshot, no system checkpoint
:param reuse_ext: Reuse any existing external files
- :param quiesce: Use QGA to quiece all mounted file systems
+ :param quiesce: Use QGA to quiesce all mounted file systems
"""
flags = no_metadata and (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA or 0)
@@ -655,6 +674,7 @@ class Guest(object):
stats = self._domain.jobStats()
return JobInfo(**stats)
except libvirt.libvirtError as ex:
+ errmsg = ex.get_error_message()
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
@@ -667,6 +687,12 @@ class Guest(object):
# away completclsely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return JobInfo(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
+ elif (ex.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR and
+ errmsg and "migration was active, "
+ "but no RAM info was set" in errmsg):
+ LOG.debug("Migration is active or completed but "
+ "virDomainGetJobStats is missing ram: %s", ex)
+ return JobInfo(type=libvirt.VIR_DOMAIN_JOB_NONE)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index 5c39dd320f..1ae86d9f47 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -31,6 +31,7 @@ from collections import defaultdict
import fnmatch
import glob
import inspect
+from lxml import etree
import operator
import os
import queue
@@ -46,6 +47,7 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
+from oslo_utils import strutils
from oslo_utils import units
from oslo_utils import versionutils
@@ -64,6 +66,7 @@ from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt import utils as libvirt_utils
+import nova.virt.node # noqa
if ty.TYPE_CHECKING:
import libvirt
@@ -136,6 +139,7 @@ class Host(object):
self._caps = None
self._domain_caps = None
self._hostname = None
+ self._node_uuid = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
@@ -488,7 +492,7 @@ class Host(object):
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
- self._event_thread.setDaemon(True)
+ self._event_thread.daemon = True
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
@@ -736,6 +740,14 @@ class Host(object):
return doms
+ def get_available_cpus(self):
+ """Get the set of CPUs that exist on the host.
+
+ :returns: set of CPUs, raises libvirtError on error
+ """
+ cpus, cpu_map, online = self.get_connection().getCPUMap()
+ return {cpu for cpu in range(cpus)}
+
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
@@ -1057,6 +1069,12 @@ class Host(object):
{'old': self._hostname, 'new': hostname})
return self._hostname
+ def get_node_uuid(self):
+ """Returns the UUID of this node."""
+ if not self._node_uuid:
+ self._node_uuid = nova.virt.node.get_local_node_uuid()
+ return self._node_uuid
+
def find_secret(self, usage_type, usage_id):
"""Find a secret.
@@ -1197,6 +1215,25 @@ class Host(object):
stats["frequency"] = self._get_hardware_info()[3]
return stats
+ def _check_machine_type(self, caps, mach_type):
+ """Validate if hw machine type is in capabilities of the host
+
+ :param caps: host capabilities
+ :param mach_type: machine type
+ """
+ possible_machine_types = []
+
+ caps_tree = etree.fromstring(str(caps))
+ for guest in caps_tree.findall('guest'):
+ for machine in guest.xpath('arch/machine'):
+ possible_machine_types.append(machine.text)
+
+ if mach_type not in possible_machine_types:
+ raise exception.InvalidMachineType(
+ message="'%s' is not valid/supported machine type, "
+ "Supported machine types are: %s" % (
+ mach_type, possible_machine_types))
+
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
@@ -1229,12 +1266,65 @@ class Host(object):
cfgdev.parse_str(xmlstr)
return cfgdev.pci_capability.features
+ def _get_vf_parent_pci_vpd_info(
+ self,
+ vf_device: 'libvirt.virNodeDevice',
+ parent_pf_name: str,
+ candidate_devs: ty.List['libvirt.virNodeDevice']
+ ) -> ty.Optional[vconfig.LibvirtConfigNodeDeviceVpdCap]:
+ """Returns PCI VPD info of a parent device of a PCI VF.
+
+ :param vf_device: a VF device object to use for lookup.
+ :param str parent_pf_name: parent PF name formatted as pci_dddd_bb_ss_f
+ :param candidate_devs: devices that could be parent devs for the VF.
+ :returns: A VPD capability object of a parent device.
+ """
+ parent_dev = next(
+ (dev for dev in candidate_devs if dev.name() == parent_pf_name),
+ None
+ )
+ if parent_dev is None:
+ return None
+
+ xmlstr = parent_dev.XMLDesc(0)
+ cfgdev = vconfig.LibvirtConfigNodeDevice()
+ cfgdev.parse_str(xmlstr)
+ return cfgdev.pci_capability.vpd_capability
+
+ @staticmethod
+ def _get_vpd_card_serial_number(
+ dev: 'libvirt.virNodeDevice',
+ ) -> ty.Optional[ty.List[str]]:
+ """Returns a card serial number stored in PCI VPD (if present)."""
+ xmlstr = dev.XMLDesc(0)
+ cfgdev = vconfig.LibvirtConfigNodeDevice()
+ cfgdev.parse_str(xmlstr)
+ vpd_cap = cfgdev.pci_capability.vpd_capability
+ if not vpd_cap:
+ return None
+ return vpd_cap.card_serial_number
+
+ def _get_pf_details(self, device: dict, pci_address: str) -> dict:
+ if device.get('dev_type') != fields.PciDeviceType.SRIOV_PF:
+ return {}
+
+ try:
+ return {
+ 'mac_address': pci_utils.get_mac_by_pci_address(pci_address)
+ }
+ except exception.PciDeviceNotFoundById:
+ LOG.debug(
+ 'Cannot get MAC address of the PF %s. It is probably attached '
+ 'to a guest already', pci_address)
+ return {}
+
def _get_pcidev_info(
self,
devname: str,
dev: 'libvirt.virNodeDevice',
net_devs: ty.List['libvirt.virNodeDevice'],
vdpa_devs: ty.List['libvirt.virNodeDevice'],
+ pci_devs: ty.List['libvirt.virNodeDevice'],
) -> ty.Dict[str, ty.Union[str, dict]]:
"""Returns a dict of PCI device."""
@@ -1297,23 +1387,112 @@ class Host(object):
return {'dev_type': fields.PciDeviceType.STANDARD}
+ def _get_vpd_details(
+ device_dict: dict,
+ device: 'libvirt.virNodeDevice',
+ pci_devs: ty.List['libvirt.virNodeDevice']
+ ) -> ty.Dict[str, ty.Any]:
+ """Get information from PCI VPD (if present).
+
+ PCI/PCIe devices may include the optional VPD capability. It may
+ contain useful information such as the unique serial number
+ uniquely assigned at a factory.
+
+ If a device is a VF and it does not contain the VPD capability,
+ a parent device's VPD is used (if present) as a fallback to
+ retrieve the unique add-in card number. Whether a VF exposes
+ the VPD capability or not may be controlled via a vendor-specific
+ firmware setting.
+ """
+ vpd_info: ty.Dict[str, ty.Any] = {}
+ # At the time of writing only the serial number had a clear
+ # use-case. However, the set of fields may be extended.
+ card_serial_number = self._get_vpd_card_serial_number(device)
+
+ if (not card_serial_number and
+ device_dict.get('dev_type') == fields.PciDeviceType.SRIOV_VF
+ ):
+ # Format the address of a physical function to use underscores
+ # since that's how Libvirt formats the <name> element content.
+ pf_addr = device_dict.get('parent_addr')
+ if not pf_addr:
+ LOG.warning("A VF device dict does not have a parent PF "
+ "address in it which is unexpected. Skipping "
+ "serial number retrieval")
+ return vpd_info
+
+ formatted_addr = pf_addr.replace('.', '_').replace(':', '_')
+ vpd_cap = self._get_vf_parent_pci_vpd_info(
+ device, f'pci_{formatted_addr}', pci_devs)
+ if vpd_cap is not None:
+ card_serial_number = vpd_cap.card_serial_number
+
+ if card_serial_number:
+ vpd_info = {'card_serial_number': card_serial_number}
+ return vpd_info
+
+ def _get_sriov_netdev_details(
+ device_dict: dict,
+ device: 'libvirt.virNodeDevice',
+ ) -> ty.Dict[str, ty.Dict[str, ty.Any]]:
+ """Get SR-IOV related information"""
+ sriov_info: ty.Dict[str, ty.Any] = {}
+
+ if device_dict.get('dev_type') != fields.PciDeviceType.SRIOV_VF:
+ return sriov_info
+
+ pf_addr = device_dict['parent_addr']
+
+ # A netdev VF may be associated with a PF which does not have a
+ # netdev as described in LP #1915255.
+ try:
+ sriov_info.update({
+ 'pf_mac_address': pci_utils.get_mac_by_pci_address(pf_addr)
+ })
+ except exception.PciDeviceNotFoundById:
+ LOG.debug(f'Could not get a PF mac for {pf_addr}')
+ # For the purposes Nova uses this information currently,
+ # having both a PF MAC and a VF number is needed so we return
+ # an empty dict if a PF MAC is not available.
+ return {}
+
+ vf_num = pci_utils.get_vf_num_by_pci_address(
+ device_dict['address'])
+
+ sriov_info.update({'vf_num': vf_num})
+ return sriov_info
+
def _get_device_capabilities(
device_dict: dict,
device: 'libvirt.virNodeDevice',
+ pci_devs: ty.List['libvirt.virNodeDevice'],
net_devs: ty.List['libvirt.virNodeDevice']
- ) -> ty.Dict[str, ty.Dict[str, ty.Any]]:
+ ) -> ty.Dict[str, ty.Any]:
"""Get PCI VF device's additional capabilities.
If a PCI device is a virtual function, this function reads the PCI
parent's network capabilities (must be always a NIC device) and
appends this information to the device's dictionary.
"""
- caps: ty.Dict[str, ty.Dict[str, ty.Any]] = {}
+ caps: ty.Dict[str, ty.Any] = {}
if device_dict.get('dev_type') == fields.PciDeviceType.SRIOV_VF:
pcinet_info = self._get_pcinet_info(device, net_devs)
if pcinet_info:
- return {'capabilities': {'network': pcinet_info}}
+ caps['network'] = pcinet_info
+ # Only attempt to get SR-IOV details if a VF is a netdev
+ # because there are no use cases for other dev types yet.
+ sriov_caps = _get_sriov_netdev_details(device_dict, dev)
+ if sriov_caps:
+ caps['sriov'] = sriov_caps
+
+ vpd_info = _get_vpd_details(device_dict, device, pci_devs)
+ if vpd_info:
+ caps['vpd'] = vpd_info
+
+ if caps:
+ return {'capabilities': caps}
+
return caps
xmlstr = dev.XMLDesc(0)
@@ -1339,7 +1518,9 @@ class Host(object):
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(
_get_device_type(cfgdev, address, dev, net_devs, vdpa_devs))
- device.update(_get_device_capabilities(device, dev, net_devs))
+ device.update(_get_device_capabilities(device, dev,
+ pci_devs, net_devs))
+ device.update(self._get_pf_details(device, address))
return device
def get_vdpa_nodedev_by_address(
@@ -1361,7 +1542,7 @@ class Host(object):
vdpa_devs = [
dev for dev in devices.values() if "vdpa" in dev.listCaps()]
pci_info = [
- self._get_pcidev_info(name, dev, [], vdpa_devs) for name, dev
+ self._get_pcidev_info(name, dev, [], vdpa_devs, []) for name, dev
in devices.items() if "pci" in dev.listCaps()]
parent_dev = next(
dev for dev in pci_info if dev['address'] == pci_address)
@@ -1401,7 +1582,7 @@ class Host(object):
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
- :returns: a list of virNodeDevice instance
+ :returns: a list of strings with the name of the instance
"""
return self._list_devices("mdev", flags=flags)
@@ -1440,21 +1621,66 @@ class Host(object):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
+ def compare_hypervisor_cpu(self, xmlDesc, flags=0):
+ """Compares the given CPU description with the CPU provided by
+ the host hypervisor. This is different from the older method,
+ compare_cpu(), which compares a given CPU definition with the
+ host CPU without considering the abilities of the host
+ hypervisor. Except @xmlDesc, rest of all the parameters to
+ compareHypervisorCPU API are optional (libvirt will choose
+ sensible defaults).
+ """
+ emulator = None
+ arch = None
+ machine = None
+ virttype = None
+ return self.get_connection().compareHypervisorCPU(
+ emulator, arch, machine, virttype, xmlDesc, flags)
+
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
+ return self._has_cgroupsv1_cpu_controller() or \
+ self._has_cgroupsv2_cpu_controller()
+
+ def _has_cgroupsv1_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V1...")
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
+ return True
+ LOG.debug("CPU controller missing on host.")
+ return False
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V1. "
+ "Deemed host to be missing controller by this approach.")
+ return False
+
+ def _has_cgroupsv2_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V2...")
+ try:
+ with open("/sys/fs/cgroup/cgroup.controllers", "r") as fd:
+ for line in fd.readlines():
+ bits = line.split()
+ if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
return True
+ LOG.debug("CPU controller missing on host.")
return False
- except IOError:
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V2. "
+ "Deemed host to be missing controller by this approach.")
return False
def get_canonical_machine_type(self, arch, machine) -> str:
@@ -1570,9 +1796,9 @@ class Host(object):
return False
with open(SEV_KERNEL_PARAM_FILE) as f:
- contents = f.read()
- LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, contents)
- return contents == "1\n"
+ content = f.read()
+ LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, content)
+ return strutils.bool_from_string(content)
@property
def supports_amd_sev(self) -> bool:
@@ -1616,6 +1842,23 @@ class Host(object):
return self._supports_amd_sev
@property
+ def supports_remote_managed_ports(self) -> bool:
+ """Determine if the host supports remote managed ports.
+
+ Returns a boolean indicating whether remote managed ports are
+ possible to use on this host.
+
+ The check is based on a Libvirt version which added support for
+ parsing and exposing PCI VPD since a card serial number (if present in
+ the VPD) since the use of remote managed ports depends on this.
+ https://libvirt.org/news.html#v7-9-0-2021-11-01
+
+ The actual presence of a card serial number for a particular device
+ is meant to be checked elsewhere.
+ """
+ return self.has_min_version(lv_ver=(7, 9, 0))
+
+ @property
def loaders(self) -> ty.List[dict]:
"""Retrieve details of loader configuration for the host.
@@ -1642,11 +1885,11 @@ class Host(object):
arch: str,
machine: str,
has_secure_boot: bool,
- ) -> ty.Tuple[str, str]:
+ ) -> ty.Tuple[str, str, bool]:
"""Get loader for the specified architecture and machine type.
- :returns: A tuple of the bootloader executable path and the NVRAM
- template path.
+ :returns: A the bootloader executable path and the NVRAM
+ template path and a bool indicating if we need to enable SMM.
"""
machine = self.get_canonical_machine_type(arch, machine)
@@ -1676,6 +1919,7 @@ class Host(object):
return (
loader['mapping']['executable']['filename'],
loader['mapping']['nvram-template']['filename'],
+ 'requires-smm' in loader['features'],
)
raise exception.UEFINotSupported()
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 08bad69489..0a64ef43dd 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -81,14 +81,24 @@ def _update_utime_ignore_eacces(path):
class Image(metaclass=abc.ABCMeta):
SUPPORTS_CLONE = False
-
- def __init__(self, path, source_type, driver_format, is_block_dev=False):
+ SUPPORTS_LUKS = False
+
+ def __init__(
+ self,
+ path,
+ source_type,
+ driver_format,
+ is_block_dev=False,
+ disk_info_mapping=None
+ ):
"""Image initialization.
:param path: libvirt's representation of the path of this disk.
:param source_type: block or file
:param driver_format: raw or qcow2
:param is_block_dev:
+ :param disk_info_mapping: disk_info['mapping'][device] metadata
+ specific to this image generated by nova.virt.libvirt.blockinfo.
"""
if (CONF.ephemeral_storage_encryption.enabled and
not self._supports_encryption()):
@@ -105,6 +115,8 @@ class Image(metaclass=abc.ABCMeta):
self.is_block_dev = is_block_dev
self.preallocate = False
+ self.disk_info_mapping = disk_info_mapping
+
# NOTE(dripton): We store lines of json (path, disk_format) in this
# file, for some image types, to prevent attacks based on changing the
# disk_format.
@@ -145,22 +157,23 @@ class Image(metaclass=abc.ABCMeta):
pass
def libvirt_info(
- self, disk_info, cache_mode, extra_specs, boot_order=None,
- disk_unit=None,
+ self, cache_mode, extra_specs, boot_order=None, disk_unit=None,
):
"""Get `LibvirtConfigGuestDisk` filled for this image.
- :disk_info: Metadata generated by libvirt.blockinfo.get_disk_mapping
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
:boot_order: Disk device boot order
"""
- disk_bus = disk_info['bus']
+ if self.disk_info_mapping is None:
+ raise AttributeError(
+ 'Image must have disk_info_mapping to call libvirt_info()')
+ disk_bus = self.disk_info_mapping['bus']
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
- info.source_device = disk_info['type']
+ info.source_device = self.disk_info_mapping['type']
info.target_bus = disk_bus
- info.target_dev = disk_info['dev']
+ info.target_dev = self.disk_info_mapping['dev']
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
info.driver_io = self.driver_io
@@ -522,11 +535,16 @@ class Flat(Image):
when creating a disk from a qcow2 if force_raw_images is not set in config.
"""
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
self.disk_name = disk_name
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Flat, self).__init__(path, "file", "raw", is_block_dev=False)
+ super().__init__(
+ path, "file", "raw", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
@@ -568,15 +586,21 @@ class Flat(Image):
def copy_raw_image(base, target, size):
libvirt_utils.copy_image(base, target)
if size:
- image = imgmodel.LocalFileImage(target,
- self.driver_format)
- disk.extend(image, size)
+ self.resize_image(size)
generating = 'image_id' not in kwargs
if generating:
if not self.exists():
# Generating image in place
prepare_template(target=self.path, *args, **kwargs)
+
+ # NOTE(plibeau): extend the disk in the case of image is not
+ # accessible anymore by the customer and the base image is
+ # available on source compute during the resize of the
+ # instance.
+ else:
+ if size:
+ self.resize_image(size)
else:
if not os.path.exists(base):
prepare_template(target=base, *args, **kwargs)
@@ -608,10 +632,15 @@ class Flat(Image):
class Qcow2(Image):
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Qcow2, self).__init__(path, "file", "qcow2", is_block_dev=False)
+ super().__init__(
+ path, "file", "qcow2", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
@@ -625,7 +654,8 @@ class Qcow2(Image):
@utils.synchronized(filename, external=True, lock_path=self.lock_path)
def create_qcow2_image(base, target, size):
- libvirt_utils.create_cow_image(base, target, size)
+ libvirt_utils.create_image(
+ target, 'qcow2', size, backing_file=base)
# Download the unmodified base image unless we already have a copy.
if not os.path.exists(base):
@@ -689,7 +719,10 @@ class Lvm(Image):
def escape(filename):
return filename.replace('_', '__')
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None,
+ disk_info_mapping=None
+ ):
self.ephemeral_key_uuid = instance.get('ephemeral_key_uuid')
if self.ephemeral_key_uuid is not None:
@@ -718,7 +751,10 @@ class Lvm(Image):
self.lv_path = os.path.join('/dev', self.vg, self.lv)
path = '/dev/mapper/' + dmcrypt.volume_name(self.lv)
- super(Lvm, self).__init__(path, "block", "raw", is_block_dev=True)
+ super(Lvm, self).__init__(
+ path, "block", "raw", is_block_dev=True,
+ disk_info_mapping=disk_info_mapping
+ )
# TODO(sbauza): Remove the config option usage and default the
# LVM logical volume creation to preallocate the full size only.
@@ -826,7 +862,9 @@ class Rbd(Image):
SUPPORTS_CLONE = True
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
if not CONF.libvirt.images_rbd_pool:
raise RuntimeError(_('You should specify'
' images_rbd_pool'
@@ -848,31 +886,32 @@ class Rbd(Image):
if self.driver.ceph_conf:
path += ':conf=' + self.driver.ceph_conf
- super(Rbd, self).__init__(path, "block", "rbd", is_block_dev=False)
+ super().__init__(
+ path, "block", "rbd", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.discard_mode = CONF.libvirt.hw_disk_discard
def libvirt_info(
- self, disk_info, cache_mode, extra_specs, boot_order=None,
- disk_unit=None,
+ self, cache_mode, extra_specs, boot_order=None, disk_unit=None
):
"""Get `LibvirtConfigGuestDisk` filled for this image.
- :disk_info: Metadata generated by libvirt.blockinfo.get_disk_mapping
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
:boot_order: Disk device boot order
"""
info = vconfig.LibvirtConfigGuestDisk()
- disk_bus = disk_info['bus']
+ disk_bus = self.disk_info_mapping['bus']
hosts, ports = self.driver.get_mon_addrs()
- info.source_device = disk_info['type']
+ info.source_device = self.disk_info_mapping['type']
info.driver_format = 'raw'
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
info.target_bus = disk_bus
- info.target_dev = disk_info['dev']
+ info.target_dev = self.disk_info_mapping['dev']
info.source_type = 'network'
info.source_protocol = 'rbd'
info.source_name = '%s/%s' % (self.driver.pool, self.rbd_name)
@@ -1189,10 +1228,15 @@ class Rbd(Image):
class Ploop(Image):
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Ploop, self).__init__(path, "file", "ploop", is_block_dev=False)
+ super().__init__(
+ path, "file", "ploop", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.resolve_driver_format()
@@ -1295,18 +1339,25 @@ class Backend(object):
raise RuntimeError(_('Unknown image_type=%s') % image_type)
return image
- def by_name(self, instance, name, image_type=None):
+ def by_name(self, instance, name, image_type=None, disk_info_mapping=None):
"""Return an Image object for a disk with the given name.
:param instance: the instance which owns this disk
:param name: The name of the disk
:param image_type: (Optional) Image type.
Default is CONF.libvirt.images_type.
+ :param disk_info_mapping: (Optional) Disk info mapping dict
:return: An Image object for the disk with given name and instance.
:rtype: Image
"""
+ # NOTE(artom) To pass functional tests, wherein the code here is loaded
+ # *before* any config with self.flags() is done, we need to have the
+ # default inline in the method, and not in the kwarg declaration.
+ image_type = image_type or CONF.libvirt.images_type
backend = self.backend(image_type)
- return backend(instance=instance, disk_name=name)
+ return backend(
+ instance=instance, disk_name=name,
+ disk_info_mapping=disk_info_mapping)
def by_libvirt_path(self, instance, path, image_type=None):
"""Return an Image object for a disk with the given libvirt path.
diff --git a/nova/virt/libvirt/migration.py b/nova/virt/libvirt/migration.py
index 8cea9f2983..4726111a76 100644
--- a/nova/virt/libvirt/migration.py
+++ b/nova/virt/libvirt/migration.py
@@ -62,6 +62,7 @@ def get_updated_guest_xml(instance, guest, migrate_data, get_volume_config,
xml_doc, migrate_data, instance, get_volume_config)
xml_doc = _update_perf_events_xml(xml_doc, migrate_data)
xml_doc = _update_memory_backing_xml(xml_doc, migrate_data)
+ xml_doc = _update_quota_xml(instance, xml_doc)
if get_vif_config is not None:
xml_doc = _update_vif_xml(xml_doc, migrate_data, get_vif_config)
if 'dst_numa_info' in migrate_data:
@@ -71,6 +72,18 @@ def get_updated_guest_xml(instance, guest, migrate_data, get_volume_config,
return etree.tostring(xml_doc, encoding='unicode')
+def _update_quota_xml(instance, xml_doc):
+ flavor_shares = instance.flavor.extra_specs.get('quota:cpu_shares')
+ cputune = xml_doc.find('./cputune')
+ shares = xml_doc.find('./cputune/shares')
+ if shares is not None and not flavor_shares:
+ cputune.remove(shares)
+ # Remove the cputune element entirely if it has no children left.
+ if cputune is not None and not list(cputune):
+ xml_doc.remove(cputune)
+ return xml_doc
+
+
def _update_device_resources_xml(xml_doc, new_resources):
vpmems = []
for resource in new_resources:
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index da2a6e8b8a..e1298ee5c8 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -22,6 +22,7 @@ import grp
import os
import pwd
import re
+import tempfile
import typing as ty
import uuid
@@ -110,55 +111,99 @@ VTPM_DIR = '/var/lib/libvirt/swtpm/'
def create_image(
- disk_format: str, path: str, size: ty.Union[str, int],
+ path: str,
+ disk_format: str,
+ disk_size: ty.Optional[ty.Union[str, int]],
+ backing_file: ty.Optional[str] = None,
+ encryption: ty.Optional[ty.Dict[str, ty.Any]] = None
) -> None:
- """Create a disk image
-
- :param disk_format: Disk image format (as known by qemu-img)
+ """Disk image creation with qemu-img
:param path: Desired location of the disk image
- :param size: Desired size of disk image. May be given as an int or
- a string. If given as an int, it will be interpreted
- as bytes. If it's a string, it should consist of a number
- with an optional suffix ('K' for Kibibytes,
- M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
- If no suffix is given, it will be interpreted as bytes.
+ :param disk_format: Disk image format (as known by qemu-img)
+ :param disk_size: Desired size of disk image. May be given as an int or
+ a string. If given as an int, it will be interpreted as bytes. If it's
+ a string, it should consist of a number with an optional suffix ('K'
+ for Kibibytes, M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
+ If no suffix is given, it will be interpreted as bytes.
+ Can be None in the case of a COW image.
+ :param backing_file: (Optional) Backing file to use.
+ :param encryption: (Optional) Dict detailing various encryption attributes
+ such as the format and passphrase.
"""
- processutils.execute('qemu-img', 'create', '-f', disk_format, path, size)
-
+ cmd = [
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f', disk_format
+ ]
-def create_cow_image(
- backing_file: ty.Optional[str], path: str, size: ty.Optional[int] = None,
-) -> None:
- """Create COW image
-
- Creates a COW image with the given backing file
-
- :param backing_file: Existing image on which to base the COW image
- :param path: Desired location of the COW image
- """
- base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
- cow_opts = []
if backing_file:
base_details = images.qemu_img_info(backing_file)
- cow_opts += ['backing_file=%s' % backing_file]
- cow_opts += ['backing_fmt=%s' % base_details.file_format]
- else:
- base_details = None
- # Explicitly inherit the value of 'cluster_size' property of a qcow2
- # overlay image from its backing file. This can be useful in cases
- # when people create a base image with a non-default 'cluster_size'
- # value or cases when images were created with very old QEMU
- # versions which had a different default 'cluster_size'.
- if base_details and base_details.cluster_size is not None:
- cow_opts += ['cluster_size=%s' % base_details.cluster_size]
- if size is not None:
- cow_opts += ['size=%s' % size]
- if cow_opts:
+ cow_opts = [
+ f'backing_file={backing_file}',
+ f'backing_fmt={base_details.file_format}'
+ ]
+ # Explicitly inherit the value of 'cluster_size' property of a qcow2
+ # overlay image from its backing file. This can be useful in cases when
+ # people create a base image with a non-default 'cluster_size' value or
+ # cases when images were created with very old QEMU versions which had
+ # a different default 'cluster_size'.
+ if base_details.cluster_size is not None:
+ cow_opts += [f'cluster_size={base_details.cluster_size}']
+
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
- cow_opts = ['-o', csv_opts]
- cmd = base_cmd + cow_opts + [path]
- processutils.execute(*cmd)
+ cmd += ['-o', csv_opts]
+
+ # Disk size can be None in the case of a COW image
+ disk_size_arg = [str(disk_size)] if disk_size is not None else []
+
+ if encryption:
+ with tempfile.NamedTemporaryFile(mode='tr+', encoding='utf-8') as f:
+ # Write out the passphrase secret to a temp file
+ f.write(encryption.get('secret'))
+
+ # Ensure the secret is written to disk, we can't .close() here as
+ # that removes the file when using NamedTemporaryFile
+ f.flush()
+
+ # The basic options include the secret and encryption format
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={f.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+ # Supported luks options:
+ # cipher-alg=<str> - Name of cipher algorithm and key length
+ # cipher-mode=<str> - Name of encryption cipher mode
+ # hash-alg=<str> - Name of hash algorithm to use for PBKDF
+ # iter-time=<num> - Time to spend in PBKDF in milliseconds
+ # ivgen-alg=<str> - Name of IV generator algorithm
+ # ivgen-hash-alg=<str> - Name of IV generator hash algorithm
+ #
+ # NOTE(melwitt): Sensible defaults (that match the qemu defaults)
+ # are hardcoded at this time for simplicity and consistency when
+ # instances are migrated. Configuration of luks options could be
+ # added in a future release.
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
+ # We need to execute the command while the NamedTemporaryFile still
+ # exists
+ cmd += encryption_opts + [path] + disk_size_arg
+ processutils.execute(*cmd)
+ else:
+ cmd += [path] + disk_size_arg
+ processutils.execute(*cmd)
def create_ploop_image(
@@ -216,8 +261,8 @@ def copy_image(
dest: str,
host: ty.Optional[str] = None,
receive: bool = False,
- on_execute: ty.Callable = None,
- on_completion: ty.Callable = None,
+ on_execute: ty.Optional[ty.Callable] = None,
+ on_completion: ty.Optional[ty.Callable] = None,
compression: bool = True,
) -> None:
"""Copy a disk image to an existing directory
@@ -526,6 +571,9 @@ def get_cpu_model_from_arch(arch: str) -> str:
mode = 'qemu32'
elif arch == obj_fields.Architecture.PPC64LE:
mode = 'POWER8'
+ # TODO(chateaulav): Testing of emulated archs ongoing
+ # elif arch == obj_fields.Architecture.MIPSEL:
+ # mode = '24Kf-mips-cpu'
# NOTE(kevinz): In aarch64, cpu model 'max' will offer the capabilities
# that all the stuff it can currently emulate, both for "TCG" and "KVM"
elif arch == obj_fields.Architecture.AARCH64:
@@ -568,6 +616,7 @@ def get_default_machine_type(arch: str) -> ty.Optional[str]:
default_mtypes = {
obj_fields.Architecture.ARMV7: "virt",
obj_fields.Architecture.AARCH64: "virt",
+ obj_fields.Architecture.PPC64LE: "pseries",
obj_fields.Architecture.S390: "s390-ccw-virtio",
obj_fields.Architecture.S390X: "s390-ccw-virtio",
obj_fields.Architecture.I686: "pc",
@@ -577,17 +626,31 @@ def get_default_machine_type(arch: str) -> ty.Optional[str]:
def mdev_name2uuid(mdev_name: str) -> str:
- """Convert an mdev name (of the form mdev_<uuid_with_underscores>) to a
- uuid (of the form 8-4-4-4-12).
+ """Convert an mdev name (of the form mdev_<uuid_with_underscores> or
+ mdev_<uuid_with_underscores>_<pciaddress>) to a uuid
+ (of the form 8-4-4-4-12).
+
+ :param mdev_name: the name of the mdev to parse the UUID from
+ :returns: string containing the uuid
"""
- return str(uuid.UUID(mdev_name[5:].replace('_', '-')))
+ mdev_uuid = mdev_name[5:].replace('_', '-')
+ # Unconditionnally remove the PCI address from the name
+ mdev_uuid = mdev_uuid[:36]
+ return str(uuid.UUID(mdev_uuid))
+
+def mdev_uuid2name(mdev_uuid: str, parent: ty.Optional[str] = None) -> str:
+ """Convert an mdev uuid (of the form 8-4-4-4-12) and optionally its parent
+ device to a name (of the form mdev_<uuid_with_underscores>[_<pciid>]).
-def mdev_uuid2name(mdev_uuid: str) -> str:
- """Convert an mdev uuid (of the form 8-4-4-4-12) to a name (of the form
- mdev_<uuid_with_underscores>).
+ :param mdev_uuid: the uuid of the mediated device
+ :param parent: the parent device id for the mediated device
+ :returns: name of the mdev to reference in libvirt
"""
- return "mdev_" + mdev_uuid.replace('-', '_')
+ name = "mdev_" + mdev_uuid.replace('-', '_')
+ if parent and parent.startswith('pci_'):
+ name = name + parent[4:]
+ return name
def get_flags_by_flavor_specs(flavor: 'objects.Flavor') -> ty.Set[str]:
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 85c83572e1..6a7daa6b54 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -623,7 +623,7 @@ class LibvirtGenericVIFDriver(object):
# 2. libvirt driver does not change mac address for macvtap VNICs
# or Alternatively does not rely on recreating libvirt's nodev
# name from the current mac address set on the netdevice.
- # See: virt.libvrit.driver.LibvirtDriver._get_pcinet_info
+ # See: virt.libvirt.driver.LibvirtDriver._get_pcinet_info
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
set_vf_interface_vlan(
vif['profile']['pci_slot'],
diff --git a/nova/virt/libvirt/volume/fibrechannel.py b/nova/virt/libvirt/volume/fibrechannel.py
index b50db3aa1c..22c65e99c0 100644
--- a/nova/virt/libvirt/volume/fibrechannel.py
+++ b/nova/virt/libvirt/volume/fibrechannel.py
@@ -79,7 +79,6 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Extend the volume."""
LOG.debug("calling os-brick to extend FC Volume", instance=instance)
new_size = self.connector.extend_volume(connection_info['data'])
- LOG.debug("Extend FC Volume %s; new_size=%s",
- connection_info['data']['device_path'],
+ LOG.debug("Extend FC Volume: new_size=%s",
new_size, instance=instance)
return new_size
diff --git a/nova/virt/libvirt/volume/lightos.py b/nova/virt/libvirt/volume/lightos.py
new file mode 100644
index 0000000000..d6d393994e
--- /dev/null
+++ b/nova/virt/libvirt/volume/lightos.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2016-2020 Lightbits Labs Ltd.
+# Copyright (C) 2020 Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import nova.conf
+from nova import utils
+from nova.virt.libvirt.volume import volume as libvirt_volume
+from os_brick import initiator
+from os_brick.initiator import connector
+from oslo_log import log as logging
+
+
+LOG = logging.getLogger(__name__)
+CONF = nova.conf.CONF
+
+
+class LibvirtLightOSVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
+ """Driver to attach NVMe volumes to libvirt."""
+ VERSION = '2.3.12'
+
+ def __init__(self, connection):
+ super(LibvirtLightOSVolumeDriver, self).__init__(connection)
+ self.connector = connector.InitiatorConnector.factory(
+ initiator.LIGHTOS,
+ root_helper=utils.get_root_helper(),
+ device_scan_attempts=CONF.libvirt.num_nvme_discover_tries)
+
+ def connect_volume(self, connection_info, instance):
+ device_info = self.connector.connect_volume(connection_info['data'])
+ LOG.debug("Connecting NVMe volume with device_info %s", device_info)
+ connection_info['data']['device_path'] = device_info['path']
+
+ def disconnect_volume(self, connection_info, instance):
+ """Detach the volume from the instance."""
+ LOG.debug("Disconnecting NVMe disk. instance:%s, volume_id:%s",
+ connection_info.get("instance", ""),
+ connection_info.get("volume_id", ""))
+ self.connector.disconnect_volume(connection_info['data'], None)
+ super(LibvirtLightOSVolumeDriver, self).disconnect_volume(
+ connection_info, instance)
+
+ def extend_volume(self, connection_info, instance, requested_size=None):
+ """Extend the volume."""
+ LOG.debug("calling os-brick to extend LightOS Volume."
+ "instance:%s, volume_id:%s",
+ connection_info.get("instance", ""),
+ connection_info.get("volume_id", ""))
+ new_size = self.connector.extend_volume(connection_info['data'])
+ LOG.debug("Extend LightOS Volume %s; new_size=%s",
+ connection_info['data']['device_path'], new_size)
+ return new_size
diff --git a/nova/virt/libvirt/volume/nvme.py b/nova/virt/libvirt/volume/nvme.py
index fefaaf434d..7436552812 100644
--- a/nova/virt/libvirt/volume/nvme.py
+++ b/nova/virt/libvirt/volume/nvme.py
@@ -33,6 +33,7 @@ class LibvirtNVMEVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
self.connector = connector.InitiatorConnector.factory(
initiator.NVME, utils.get_root_helper(),
+ use_multipath=CONF.libvirt.volume_use_multipath,
device_scan_attempts=CONF.libvirt.num_nvme_discover_tries)
def connect_volume(self, connection_info, instance):
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 6ea91e2221..0ab3ddc4c1 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -263,12 +263,19 @@ def _get_eth_link(vif, ifc_num):
'id': link_id,
'vif_id': vif['id'],
'type': nic_type,
- 'mtu': vif['network']['meta'].get('mtu'),
+ 'mtu': _get_link_mtu(vif),
'ethernet_mac_address': vif.get('address'),
}
return link
+def _get_link_mtu(vif):
+ for subnet in vif['network']['subnets']:
+ if subnet['meta'].get('dhcp_server'):
+ return None
+ return vif['network']['meta'].get('mtu')
+
+
def _get_nets(vif, subnet, version, net_num, link_id):
"""Get networks for the given VIF and subnet
diff --git a/nova/virt/node.py b/nova/virt/node.py
new file mode 100644
index 0000000000..4cb3d0a573
--- /dev/null
+++ b/nova/virt/node.py
@@ -0,0 +1,108 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+import uuid
+
+from oslo_utils import uuidutils
+
+import nova.conf
+from nova import exception
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+COMPUTE_ID_FILE = 'compute_id'
+LOCAL_NODE_UUID = None
+
+
+def write_local_node_uuid(node_uuid):
+ # We only ever write an identity file in the CONF.state_path
+ # location
+ fn = os.path.join(CONF.state_path, COMPUTE_ID_FILE)
+
+ # Try to create the identity file and write our uuid into it. Fail
+ # if the file exists (since it shouldn't if we made it here).
+ try:
+ with open(fn, 'x') as f:
+ f.write(node_uuid)
+ except FileExistsError:
+ # If the file exists, we must either fail or re-survey all the
+ # potential files. If we just read and return it, it could be
+ # inconsistent with files in the other locations.
+ raise exception.InvalidNodeConfiguration(
+ reason='Identity file %s appeared unexpectedly' % fn)
+ except Exception as e:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to write uuid to %s: %s' % (fn, e))
+
+ LOG.info('Wrote node identity %s to %s', node_uuid, fn)
+
+
+def read_local_node_uuid():
+ locations = ([os.path.dirname(f) for f in CONF.config_file] +
+ [CONF.state_path])
+
+ uuids = []
+ found = []
+ for location in locations:
+ fn = os.path.join(location, COMPUTE_ID_FILE)
+ try:
+ # UUIDs should be 36 characters in canonical format. Read
+ # a little more to be graceful about whitespace in/around
+ # the actual value we want to read. However, it must parse
+ # to a legit UUID once we strip the whitespace.
+ with open(fn) as f:
+ content = f.read(40)
+ node_uuid = str(uuid.UUID(content.strip()))
+ except FileNotFoundError:
+ continue
+ except ValueError:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to parse UUID from %s' % fn)
+ uuids.append(node_uuid)
+ found.append(fn)
+
+ if uuids:
+ # Any identities we found must be consistent, or we fail
+ first = uuids[0]
+ for i, (node_uuid, fn) in enumerate(zip(uuids, found)):
+ if node_uuid != first:
+ raise exception.InvalidNodeConfiguration(
+ reason='UUID %s in %s does not match %s' % (
+ node_uuid, fn, uuids[i - 1]))
+ LOG.info('Determined node identity %s from %s', first, found[0])
+ return first
+ else:
+ return None
+
+
+def get_local_node_uuid():
+ """Read or create local node uuid file.
+
+ :returns: UUID string read from file, or generated
+ """
+ global LOCAL_NODE_UUID
+
+ if LOCAL_NODE_UUID is not None:
+ return LOCAL_NODE_UUID
+
+ node_uuid = read_local_node_uuid()
+ if not node_uuid:
+ node_uuid = uuidutils.generate_uuid()
+ LOG.info('Generated node identity %s', node_uuid)
+ write_local_node_uuid(node_uuid)
+
+ LOCAL_NODE_UUID = node_uuid
+ return node_uuid
diff --git a/nova/virt/powervm/__init__.py b/nova/virt/powervm/__init__.py
deleted file mode 100644
index 9780cb4856..0000000000
--- a/nova/virt/powervm/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.virt.powervm import driver
-
-PowerVMDriver = driver.PowerVMDriver
diff --git a/nova/virt/powervm/disk/driver.py b/nova/virt/powervm/disk/driver.py
deleted file mode 100644
index 2ba083736e..0000000000
--- a/nova/virt/powervm/disk/driver.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-import oslo_log.log as logging
-import pypowervm.const as pvm_const
-import pypowervm.tasks.scsi_mapper as tsk_map
-import pypowervm.util as pvm_u
-import pypowervm.wrappers.virtual_io_server as pvm_vios
-
-from nova import exception
-from nova.virt.powervm import mgmt
-from nova.virt.powervm import vm
-
-LOG = logging.getLogger(__name__)
-
-
-class DiskType(object):
- BOOT = 'boot'
- IMAGE = 'image'
-
-
-class IterableToFileAdapter(object):
- """A degenerate file-like so that an iterable can be read like a file.
-
- The Glance client returns an iterable, but PowerVM requires a file. This
- is the adapter between the two.
- """
-
- def __init__(self, iterable):
- self.iterator = iterable.__iter__()
- self.remaining_data = ''
-
- def read(self, size):
- chunk = self.remaining_data
- try:
- while not chunk:
- chunk = next(self.iterator)
- except StopIteration:
- return ''
- return_value = chunk[0:size]
- self.remaining_data = chunk[size:]
- return return_value
-
-
-class DiskAdapter(metaclass=abc.ABCMeta):
-
- capabilities = {
- 'shared_storage': False,
- 'has_imagecache': False,
- 'snapshot': False,
- }
-
- def __init__(self, adapter, host_uuid):
- """Initialize the DiskAdapter.
-
- :param adapter: The pypowervm adapter.
- :param host_uuid: The UUID of the PowerVM host.
- """
- self._adapter = adapter
- self._host_uuid = host_uuid
- self.mp_uuid = mgmt.mgmt_uuid(self._adapter)
-
- @abc.abstractproperty
- def _vios_uuids(self):
- """List the UUIDs of the Virtual I/O Servers hosting the storage."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def _disk_match_func(self, disk_type, instance):
- """Return a matching function to locate the disk for an instance.
-
- :param disk_type: One of the DiskType enum values.
- :param instance: The instance whose disk is to be found.
- :return: Callable suitable for the match_func parameter of the
- pypowervm.tasks.scsi_mapper.find_maps method, with the
- following specification:
- def match_func(storage_elem)
- param storage_elem: A backing storage element wrapper (VOpt,
- VDisk, PV, or LU) to be analyzed.
- return: True if the storage_elem's mapping should be included;
- False otherwise.
- """
- raise NotImplementedError()
-
- def get_bootdisk_path(self, instance, vios_uuid):
- """Find the local path for the instance's boot disk.
-
- :param instance: nova.objects.instance.Instance object owning the
- requested disk.
- :param vios_uuid: PowerVM UUID of the VIOS to search for mappings.
- :return: Local path for instance's boot disk.
- """
- vm_uuid = vm.get_pvm_uuid(instance)
- match_func = self._disk_match_func(DiskType.BOOT, instance)
- vios_wrap = pvm_vios.VIOS.get(self._adapter, uuid=vios_uuid,
- xag=[pvm_const.XAG.VIO_SMAP])
- maps = tsk_map.find_maps(vios_wrap.scsi_mappings,
- client_lpar_id=vm_uuid, match_func=match_func)
- if maps:
- return maps[0].server_adapter.backing_dev_name
- return None
-
- def _get_bootdisk_iter(self, instance):
- """Return an iterator of (storage_elem, VIOS) tuples for the instance.
-
- This method returns an iterator of (storage_elem, VIOS) tuples, where
- storage_element is a pypowervm storage element wrapper associated with
- the instance boot disk and VIOS is the wrapper of the Virtual I/O
- server owning that storage element.
-
- :param instance: nova.objects.instance.Instance object owning the
- requested disk.
- :return: Iterator of tuples of (storage_elem, VIOS).
- """
- lpar_wrap = vm.get_instance_wrapper(self._adapter, instance)
- match_func = self._disk_match_func(DiskType.BOOT, instance)
- for vios_uuid in self._vios_uuids:
- vios_wrap = pvm_vios.VIOS.get(
- self._adapter, uuid=vios_uuid, xag=[pvm_const.XAG.VIO_SMAP])
- for scsi_map in tsk_map.find_maps(
- vios_wrap.scsi_mappings, client_lpar_id=lpar_wrap.id,
- match_func=match_func):
- yield scsi_map.backing_storage, vios_wrap
-
- def connect_instance_disk_to_mgmt(self, instance):
- """Connect an instance's boot disk to the management partition.
-
- :param instance: The instance whose boot disk is to be mapped.
- :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped
- :return vios: The EntryWrapper of the VIOS from which the mapping was
- made.
- :raise InstanceDiskMappingFailed: If the mapping could not be done.
- """
- for stg_elem, vios in self._get_bootdisk_iter(instance):
- msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name}
-
- # Create a new mapping. NOTE: If there's an existing mapping on
- # the other VIOS but not this one, we'll create a second mapping
- # here. It would take an extreme sequence of events to get to that
- # point, and the second mapping would be harmless anyway. The
- # alternative would be always checking all VIOSes for existing
- # mappings, which increases the response time of the common case by
- # an entire GET of VIOS+VIO_SMAP.
- LOG.debug("Mapping boot disk %(disk_name)s to the management "
- "partition from Virtual I/O Server %(vios_name)s.",
- msg_args, instance=instance)
- try:
- tsk_map.add_vscsi_mapping(self._host_uuid, vios, self.mp_uuid,
- stg_elem)
- # If that worked, we're done. add_vscsi_mapping logged.
- return stg_elem, vios
- except Exception:
- LOG.exception("Failed to map boot disk %(disk_name)s to the "
- "management partition from Virtual I/O Server "
- "%(vios_name)s.", msg_args, instance=instance)
- # Try the next hit, if available.
- # We either didn't find the boot dev, or failed all attempts to map it.
- raise exception.InstanceDiskMappingFailed(instance_name=instance.name)
-
- @abc.abstractmethod
- def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
- """Disconnect a disk from the management partition.
-
- :param vios_uuid: The UUID of the Virtual I/O Server serving the
- mapping.
- :param disk_name: The name of the disk to unmap.
- """
- raise NotImplementedError()
-
- @abc.abstractproperty
- def capacity(self):
- """Capacity of the storage in gigabytes.
-
- Default is to make the capacity arbitrarily large.
- """
- raise NotImplementedError()
-
- @abc.abstractproperty
- def capacity_used(self):
- """Capacity of the storage in gigabytes that is used.
-
- Default is to say none of it is used.
- """
- raise NotImplementedError()
-
- @staticmethod
- def _get_disk_name(disk_type, instance, short=False):
- """Generate a name for a virtual disk associated with an instance.
-
- :param disk_type: One of the DiskType enum values.
- :param instance: The instance for which the disk is to be created.
- :param short: If True, the generated name will be limited to 15
- characters (the limit for virtual disk). If False, it
- will be limited by the API (79 characters currently).
- :return: The sanitized file name for the disk.
- """
- prefix = '%s_' % (disk_type[0] if short else disk_type)
- base = ('%s_%s' % (instance.name[:8], instance.uuid[:4]) if short
- else instance.name)
- return pvm_u.sanitize_file_name_for_api(
- base, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short
- else pvm_const.MaxLen.FILENAME_DEFAULT)
-
- @abc.abstractmethod
- def detach_disk(self, instance):
- """Detaches the storage adapters from the image disk.
-
- :param instance: instance to detach the image for.
- :return: A list of all the backing storage elements that were
- detached from the I/O Server and VM.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def delete_disks(self, storage_elems):
- """Removes the disks specified by the mappings.
-
- :param storage_elems: A list of the storage elements that are to be
- deleted. Derived from the return value from
- detach_disk.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def create_disk_from_image(self, context, instance, image_meta):
- """Creates a disk and copies the specified image to it.
-
- Cleans up created disk if an error occurs.
- :param context: nova context used to retrieve image from glance
- :param instance: instance to create the disk for.
- :param image_meta: nova.objects.ImageMeta object with the metadata of
- the image of the instance.
- :return: The backing pypowervm storage object that was created.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def attach_disk(self, instance, disk_info, stg_ftsk):
- """Attaches the disk image to the Virtual Machine.
-
- :param instance: nova instance to attach the disk to.
- :param disk_info: The pypowervm storage element returned from
- create_disk_from_image. Ex. VOptMedia, VDisk, LU,
- or PV.
- :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
- I/O Operations. If provided, the Virtual I/O Server
- mapping updates will be added to the FeedTask. This
- defers the updates to some later point in time. If
- the FeedTask is not provided, the updates will be run
- immediately when this method is executed.
- """
- raise NotImplementedError()
diff --git a/nova/virt/powervm/disk/localdisk.py b/nova/virt/powervm/disk/localdisk.py
deleted file mode 100644
index e8d2ff4f46..0000000000
--- a/nova/virt/powervm/disk/localdisk.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import oslo_log.log as logging
-from pypowervm import const as pvm_const
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import conf
-from nova import exception
-from nova.image import glance
-from nova.virt.powervm.disk import driver as disk_dvr
-from nova.virt.powervm import vm
-
-LOG = logging.getLogger(__name__)
-CONF = conf.CONF
-IMAGE_API = glance.API()
-
-
-class LocalStorage(disk_dvr.DiskAdapter):
-
- def __init__(self, adapter, host_uuid):
- super(LocalStorage, self).__init__(adapter, host_uuid)
-
- self.capabilities = {
- 'shared_storage': False,
- 'has_imagecache': False,
- # NOTE(efried): 'snapshot' capability set dynamically below.
- }
-
- # Query to get the Volume Group UUID
- if not CONF.powervm.volume_group_name:
- raise exception.OptRequiredIfOtherOptValue(
- if_opt='disk_driver', if_value='localdisk',
- then_opt='volume_group_name')
- self.vg_name = CONF.powervm.volume_group_name
- vios_w, vg_w = tsk_stg.find_vg(adapter, self.vg_name)
- self._vios_uuid = vios_w.uuid
- self.vg_uuid = vg_w.uuid
- # Set the 'snapshot' capability dynamically. If we're hosting I/O on
- # the management partition, we can snapshot. If we're hosting I/O on
- # traditional VIOS, we are limited by the fact that a VSCSI device
- # can't be mapped to two partitions (the VIOS and the management) at
- # once.
- self.capabilities['snapshot'] = self.mp_uuid == self._vios_uuid
- LOG.info("Local Storage driver initialized: volume group: '%s'",
- self.vg_name)
-
- @property
- def _vios_uuids(self):
- """List the UUIDs of the Virtual I/O Servers hosting the storage.
-
- For localdisk, there's only one.
- """
- return [self._vios_uuid]
-
- @staticmethod
- def _disk_match_func(disk_type, instance):
- """Return a matching function to locate the disk for an instance.
-
- :param disk_type: One of the DiskType enum values.
- :param instance: The instance whose disk is to be found.
- :return: Callable suitable for the match_func parameter of the
- pypowervm.tasks.scsi_mapper.find_maps method.
- """
- disk_name = LocalStorage._get_disk_name(
- disk_type, instance, short=True)
- return tsk_map.gen_match_func(pvm_stg.VDisk, names=[disk_name])
-
- @property
- def capacity(self):
- """Capacity of the storage in gigabytes."""
- vg_wrap = self._get_vg_wrap()
- return float(vg_wrap.capacity)
-
- @property
- def capacity_used(self):
- """Capacity of the storage in gigabytes that is used."""
- vg_wrap = self._get_vg_wrap()
- # Subtract available from capacity
- return float(vg_wrap.capacity) - float(vg_wrap.available_size)
-
- def delete_disks(self, storage_elems):
- """Removes the specified disks.
-
- :param storage_elems: A list of the storage elements that are to be
- deleted. Derived from the return value from
- detach_disk.
- """
- # All of localdisk is done against the volume group. So reload
- # that (to get new etag) and then update against it.
- tsk_stg.rm_vg_storage(self._get_vg_wrap(), vdisks=storage_elems)
-
- def detach_disk(self, instance):
- """Detaches the storage adapters from the image disk.
-
- :param instance: Instance to disconnect the image for.
- :return: A list of all the backing storage elements that were
- disconnected from the I/O Server and VM.
- """
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- # Build the match function
- match_func = tsk_map.gen_match_func(pvm_stg.VDisk)
-
- vios_w = pvm_vios.VIOS.get(
- self._adapter, uuid=self._vios_uuid, xag=[pvm_const.XAG.VIO_SMAP])
-
- # Remove the mappings.
- mappings = tsk_map.remove_maps(
- vios_w, lpar_uuid, match_func=match_func)
-
- # Update the VIOS with the removed mappings.
- vios_w.update()
-
- return [x.backing_storage for x in mappings]
-
- def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
- """Disconnect a disk from the management partition.
-
- :param vios_uuid: The UUID of the Virtual I/O Server serving the
- mapping.
- :param disk_name: The name of the disk to unmap.
- """
- tsk_map.remove_vdisk_mapping(self._adapter, vios_uuid, self.mp_uuid,
- disk_names=[disk_name])
- LOG.info("Unmapped boot disk %(disk_name)s from the management "
- "partition from Virtual I/O Server %(vios_name)s.",
- {'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
- 'vios_name': vios_uuid})
-
- def create_disk_from_image(self, context, instance, image_meta):
- """Creates a disk and copies the specified image to it.
-
- Cleans up the created disk if an error occurs.
-
- :param context: nova context used to retrieve image from glance
- :param instance: instance to create the disk for.
- :param image_meta: The metadata of the image of the instance.
- :return: The backing pypowervm storage object that was created.
- """
- LOG.info('Create disk.', instance=instance)
-
- return self._upload_image(context, instance, image_meta)
-
- # TODO(esberglu): Copy vdisk when implementing image cache.
-
- def _upload_image(self, context, instance, image_meta):
- """Upload a new image.
-
- :param context: Nova context used to retrieve image from glance.
- :param image_meta: The metadata of the image of the instance.
- :return: The virtual disk containing the image.
- """
-
- img_name = self._get_disk_name(disk_dvr.DiskType.BOOT, instance,
- short=True)
-
- # TODO(esberglu) Add check for cached image when adding imagecache.
-
- return tsk_stg.upload_new_vdisk(
- self._adapter, self._vios_uuid, self.vg_uuid,
- disk_dvr.IterableToFileAdapter(
- IMAGE_API.download(context, image_meta.id)), img_name,
- image_meta.size, d_size=image_meta.size,
- upload_type=tsk_stg.UploadType.IO_STREAM,
- file_format=image_meta.disk_format)[0]
-
- def attach_disk(self, instance, disk_info, stg_ftsk):
- """Attaches the disk image to the Virtual Machine.
-
- :param instance: nova instance to connect the disk to.
- :param disk_info: The pypowervm storage element returned from
- create_disk_from_image. Ex. VOptMedia, VDisk, LU,
- or PV.
- :param stg_ftsk: The pypowervm transaction FeedTask for the
- I/O Operations. The Virtual I/O Server mapping updates
- will be added to the FeedTask. This defers the updates
- to some later point in time.
- """
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- def add_func(vios_w):
- LOG.info("Adding logical volume disk connection to VIOS %(vios)s.",
- {'vios': vios_w.name}, instance=instance)
- mapping = tsk_map.build_vscsi_mapping(
- self._host_uuid, vios_w, lpar_uuid, disk_info)
- return tsk_map.add_map(vios_w, mapping)
-
- stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(add_func)
-
- def _get_vg_wrap(self):
- return pvm_stg.VG.get(self._adapter, uuid=self.vg_uuid,
- parent_type=pvm_vios.VIOS,
- parent_uuid=self._vios_uuid)
diff --git a/nova/virt/powervm/disk/ssp.py b/nova/virt/powervm/disk/ssp.py
deleted file mode 100644
index e7cdc9cf6c..0000000000
--- a/nova/virt/powervm/disk/ssp.py
+++ /dev/null
@@ -1,258 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import random
-
-import oslo_log.log as logging
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.tasks import cluster_ssp as tsk_cs
-from pypowervm.tasks import partition as tsk_par
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.tasks import storage as tsk_stg
-import pypowervm.util as pvm_u
-import pypowervm.wrappers.cluster as pvm_clust
-import pypowervm.wrappers.storage as pvm_stg
-
-from nova import exception
-from nova.image import glance
-from nova.virt.powervm.disk import driver as disk_drv
-from nova.virt.powervm import vm
-
-LOG = logging.getLogger(__name__)
-
-IMAGE_API = glance.API()
-
-
-class SSPDiskAdapter(disk_drv.DiskAdapter):
- """Provides a disk adapter for Shared Storage Pools.
-
- Shared Storage Pools are a clustered file system technology that can link
- together Virtual I/O Servers.
-
- This adapter provides the connection for nova ephemeral storage (not
- Cinder) to connect to virtual machines.
- """
-
- capabilities = {
- 'shared_storage': True,
- # NOTE(efried): Whereas the SSP disk driver definitely does image
- # caching, it's not through the nova.virt.imagecache.ImageCacheManager
- # API. Setting `has_imagecache` to True here would have the side
- # effect of having a periodic task try to call this class's
- # manage_image_cache method (not implemented here; and a no-op in the
- # superclass) which would be harmless, but unnecessary.
- 'has_imagecache': False,
- 'snapshot': True,
- }
-
- def __init__(self, adapter, host_uuid):
- """Initialize the SSPDiskAdapter.
-
- :param adapter: pypowervm.adapter.Adapter for the PowerVM REST API.
- :param host_uuid: PowerVM UUID of the managed system.
- """
- super(SSPDiskAdapter, self).__init__(adapter, host_uuid)
-
- try:
- self._clust = pvm_clust.Cluster.get(self._adapter)[0]
- self._ssp = pvm_stg.SSP.get_by_href(
- self._adapter, self._clust.ssp_uri)
- self._tier = tsk_stg.default_tier_for_ssp(self._ssp)
- except pvm_exc.Error:
- LOG.exception("A unique PowerVM Cluster and Shared Storage Pool "
- "is required in the default Tier.")
- raise exception.NotFound()
-
- LOG.info(
- "SSP Storage driver initialized. Cluster '%(clust_name)s'; "
- "SSP '%(ssp_name)s'; Tier '%(tier_name)s'",
- {'clust_name': self._clust.name, 'ssp_name': self._ssp.name,
- 'tier_name': self._tier.name})
-
- @property
- def capacity(self):
- """Capacity of the storage in gigabytes."""
- # Retrieving the Tier is faster (because don't have to refresh LUs.)
- return float(self._tier.refresh().capacity)
-
- @property
- def capacity_used(self):
- """Capacity of the storage in gigabytes that is used."""
- self._ssp = self._ssp.refresh()
- return float(self._ssp.capacity) - float(self._ssp.free_space)
-
- def detach_disk(self, instance):
- """Detaches the storage adapters from the disk.
-
- :param instance: instance from which to detach the image.
- :return: A list of all the backing storage elements that were detached
- from the I/O Server and VM.
- """
- stg_ftsk = tsk_par.build_active_vio_feed_task(
- self._adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])
-
- lpar_uuid = vm.get_pvm_uuid(instance)
- match_func = tsk_map.gen_match_func(pvm_stg.LU)
-
- def rm_func(vwrap):
- LOG.info("Removing SSP disk connection to VIOS %s.",
- vwrap.name, instance=instance)
- return tsk_map.remove_maps(vwrap, lpar_uuid,
- match_func=match_func)
-
- # Remove the mapping from *each* VIOS on the LPAR's host.
- # The LPAR's host has to be self._host_uuid, else the PowerVM API will
- # fail.
- #
- # Note - this may not be all the VIOSes on the system...just the ones
- # in the SSP cluster.
- #
- # The mappings will normally be the same on all VIOSes, unless a VIOS
- # was down when a disk was added. So for the return value, we need to
- # collect the union of all relevant mappings from all VIOSes.
- lu_set = set()
- for vios_uuid in self._vios_uuids:
- # Add the remove for the VIO
- stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)
-
- # Find the active LUs so that a delete op knows what to remove.
- vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper
- mappings = tsk_map.find_maps(vios_w.scsi_mappings,
- client_lpar_id=lpar_uuid,
- match_func=match_func)
- if mappings:
- lu_set.update([x.backing_storage for x in mappings])
-
- stg_ftsk.execute()
-
- return list(lu_set)
-
- def delete_disks(self, storage_elems):
- """Removes the disks specified by the mappings.
-
- :param storage_elems: A list of the storage elements (LU
- ElementWrappers) that are to be deleted. Derived
- from the return value from detach_disk.
- """
- tsk_stg.rm_tier_storage(storage_elems, tier=self._tier)
-
- def create_disk_from_image(self, context, instance, image_meta):
- """Creates a boot disk and links the specified image to it.
-
- If the specified image has not already been uploaded, an Image LU is
- created for it. A Disk LU is then created for the instance and linked
- to the Image LU.
-
- :param context: nova context used to retrieve image from glance
- :param instance: instance to create the disk for.
- :param nova.objects.ImageMeta image_meta:
- The metadata of the image of the instance.
- :return: The backing pypowervm LU storage object that was created.
- """
- LOG.info('SSP: Create boot disk from image %s.', image_meta.id,
- instance=instance)
-
- image_lu = tsk_cs.get_or_upload_image_lu(
- self._tier, pvm_u.sanitize_file_name_for_api(
- image_meta.name, prefix=disk_drv.DiskType.IMAGE + '_',
- suffix='_' + image_meta.checksum),
- random.choice(self._vios_uuids), disk_drv.IterableToFileAdapter(
- IMAGE_API.download(context, image_meta.id)), image_meta.size,
- upload_type=tsk_stg.UploadType.IO_STREAM)
-
- boot_lu_name = pvm_u.sanitize_file_name_for_api(
- instance.name, prefix=disk_drv.DiskType.BOOT + '_')
-
- LOG.info('SSP: Disk name is %s', boot_lu_name, instance=instance)
-
- return tsk_stg.crt_lu(
- self._tier, boot_lu_name, instance.flavor.root_gb,
- typ=pvm_stg.LUType.DISK, clone=image_lu)[1]
-
- def attach_disk(self, instance, disk_info, stg_ftsk):
- """Connects the disk image to the Virtual Machine.
-
- :param instance: nova instance to which to attach the disk.
- :param disk_info: The pypowervm storage element returned from
- create_disk_from_image. Ex. VOptMedia, VDisk, LU,
- or PV.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- """
- # Create the LU structure
- lu = pvm_stg.LU.bld_ref(self._adapter, disk_info.name, disk_info.udid)
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- # This is the delay apply mapping
- def add_func(vios_w):
- LOG.info("Attaching SSP disk from VIOS %s.",
- vios_w.name, instance=instance)
- mapping = tsk_map.build_vscsi_mapping(
- self._host_uuid, vios_w, lpar_uuid, lu)
- return tsk_map.add_map(vios_w, mapping)
-
- # Add the mapping to *each* VIOS on the LPAR's host.
- # The LPAR's host has to be self._host_uuid, else the PowerVM API will
- # fail.
- #
- # Note: this may not be all the VIOSes on the system - just the ones
- # in the SSP cluster.
- for vios_uuid in self._vios_uuids:
- stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
-
- @property
- def _vios_uuids(self):
- """List the UUIDs of our cluster's VIOSes on this host.
-
- (If a VIOS is not on this host, we can't interact with it, even if its
- URI and therefore its UUID happen to be available in the pypowervm
- wrapper.)
-
- :return: A list of VIOS UUID strings.
- """
- ret = []
- for n in self._clust.nodes:
- # Skip any nodes that we don't have the VIOS uuid or uri
- if not (n.vios_uuid and n.vios_uri):
- continue
- if self._host_uuid == pvm_u.get_req_path_uuid(
- n.vios_uri, preserve_case=True, root=True):
- ret.append(n.vios_uuid)
- return ret
-
- def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
- """Disconnect a disk from the management partition.
-
- :param vios_uuid: The UUID of the Virtual I/O Server serving the
- mapping.
- :param disk_name: The name of the disk to unmap.
- """
- tsk_map.remove_lu_mapping(self._adapter, vios_uuid, self.mp_uuid,
- disk_names=[disk_name])
- LOG.info("Unmapped boot disk %(disk_name)s from the management "
- "partition from Virtual I/O Server %(vios_uuid)s.",
- {'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
- 'vios_uuid': vios_uuid})
-
- @staticmethod
- def _disk_match_func(disk_type, instance):
- """Return a matching function to locate the disk for an instance.
-
- :param disk_type: One of the DiskType enum values.
- :param instance: The instance whose disk is to be found.
- :return: Callable suitable for the match_func parameter of the
- pypowervm.tasks.scsi_mapper.find_maps method.
- """
- disk_name = SSPDiskAdapter._get_disk_name(disk_type, instance)
- return tsk_map.gen_match_func(pvm_stg.LU, names=[disk_name])
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
deleted file mode 100644
index 0f94a3b75b..0000000000
--- a/nova/virt/powervm/driver.py
+++ /dev/null
@@ -1,708 +0,0 @@
-# Copyright 2014, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Connection to PowerVM hypervisor through NovaLink."""
-
-import os_resource_classes as orc
-from oslo_log import log as logging
-from oslo_utils import excutils
-from oslo_utils import importutils
-from pypowervm import adapter as pvm_apt
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as log_hlp
-from pypowervm.helpers import vios_busy as vio_hlp
-from pypowervm.tasks import partition as pvm_par
-from pypowervm.tasks import storage as pvm_stor
-from pypowervm.tasks import vterm as pvm_vterm
-from pypowervm.wrappers import managed_system as pvm_ms
-from taskflow.patterns import linear_flow as tf_lf
-
-from nova.compute import task_states
-from nova import conf as cfg
-from nova.console import type as console_type
-from nova import exception as exc
-from nova.i18n import _
-from nova.image import glance
-from nova.virt import configdrive
-from nova.virt import driver
-from nova.virt.powervm import host as pvm_host
-from nova.virt.powervm.tasks import base as tf_base
-from nova.virt.powervm.tasks import image as tf_img
-from nova.virt.powervm.tasks import network as tf_net
-from nova.virt.powervm.tasks import storage as tf_stg
-from nova.virt.powervm.tasks import vm as tf_vm
-from nova.virt.powervm import vm
-from nova.virt.powervm import volume
-from nova.virt.powervm.volume import fcvscsi
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-DISK_ADPT_NS = 'nova.virt.powervm.disk'
-DISK_ADPT_MAPPINGS = {
- 'localdisk': 'localdisk.LocalStorage',
- 'ssp': 'ssp.SSPDiskAdapter'
-}
-
-
-class PowerVMDriver(driver.ComputeDriver):
- """PowerVM NovaLink Implementation of Compute Driver.
-
- https://wiki.openstack.org/wiki/PowerVM
- """
-
- def __init__(self, virtapi):
- # NOTE(edmondsw) some of these will be dynamic in future, so putting
- # capabilities on the instance rather than on the class.
- self.capabilities = {
- 'has_imagecache': False,
- 'supports_bfv_rescue': False,
- 'supports_evacuate': False,
- 'supports_migrate_to_same_host': False,
- 'supports_attach_interface': True,
- 'supports_device_tagging': False,
- 'supports_tagged_attach_interface': False,
- 'supports_tagged_attach_volume': False,
- 'supports_extend_volume': True,
- 'supports_multiattach': False,
- 'supports_trusted_certs': False,
- 'supports_pcpus': False,
- 'supports_accelerators': False,
- 'supports_vtpm': False,
- 'supports_secure_boot': False,
- 'supports_socket_pci_numa_affinity': False,
-
- # Supported image types
- "supports_image_type_aki": False,
- "supports_image_type_ami": False,
- "supports_image_type_ari": False,
- "supports_image_type_iso": False,
- "supports_image_type_qcow2": False,
- "supports_image_type_raw": True,
- "supports_image_type_vdi": False,
- "supports_image_type_vhd": False,
- "supports_image_type_vhdx": False,
- "supports_image_type_vmdk": False,
- "supports_image_type_ploop": False,
- }
- super(PowerVMDriver, self).__init__(virtapi)
-
- def init_host(self, host):
- """Initialize anything that is necessary for the driver to function.
-
- Includes catching up with currently running VMs on the given host.
- """
- LOG.warning(
- 'The powervm virt driver is deprecated and may be removed in a '
- 'future release. The driver is not tested by the OpenStack '
- 'project nor does it have clear maintainers and thus its quality'
- 'can not be ensured. If you are using the driver in production '
- 'please let us know the openstack-discuss mailing list or on IRC'
- )
-
- # Build the adapter. May need to attempt the connection multiple times
- # in case the PowerVM management API service is starting.
- # TODO(efried): Implement async compute service enable/disable like
- # I73a34eb6e0ca32d03e54d12a5e066b2ed4f19a61
- self.adapter = pvm_apt.Adapter(
- pvm_apt.Session(conn_tries=60),
- helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper])
- # Make sure the Virtual I/O Server(s) are available.
- pvm_par.validate_vios_ready(self.adapter)
- self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
-
- # Do a scrub of the I/O plane to make sure the system is in good shape
- LOG.info("Clearing stale I/O connections on driver init.")
- pvm_stor.ComprehensiveScrub(self.adapter).execute()
-
- # Initialize the disk adapter
- self.disk_dvr = importutils.import_object_ns(
- DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()],
- self.adapter, self.host_wrapper.uuid)
- self.image_api = glance.API()
-
- LOG.info("The PowerVM compute driver has been initialized.")
-
- @staticmethod
- def _log_operation(op, instance):
- """Log entry point of driver operations."""
- LOG.info('Operation: %(op)s. Virtual machine display name: '
- '%(display_name)s, name: %(name)s',
- {'op': op, 'display_name': instance.display_name,
- 'name': instance.name}, instance=instance)
-
- def get_info(self, instance, use_cache=True):
- """Get the current status of an instance.
-
- :param instance: nova.objects.instance.Instance object
- :param use_cache: unused in this driver
- :returns: An InstanceInfo object.
- """
- return vm.get_vm_info(self.adapter, instance)
-
- def list_instances(self):
- """Return the names of all the instances known to the virt host.
-
- :return: VM Names as a list.
- """
- return vm.get_lpar_names(self.adapter)
-
- def get_available_nodes(self, refresh=False):
- """Returns nodenames of all nodes managed by the compute service.
-
- This method is for multi compute-nodes support. If a driver supports
- multi compute-nodes, this method returns a list of nodenames managed
- by the service. Otherwise, this method should return
- [hypervisor_hostname].
- """
-
- return [CONF.host]
-
- def get_available_resource(self, nodename):
- """Retrieve resource information.
-
- This method is called when nova-compute launches, and as part of a
- periodic task.
-
- :param nodename: Node from which the caller wants to get resources.
- A driver that manages only one node can safely ignore
- this.
- :return: Dictionary describing resources.
- """
- # Do this here so it refreshes each time this method is called.
- self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
- return self._get_available_resource()
-
- def _get_available_resource(self):
- # Get host information
- data = pvm_host.build_host_resource_from_ms(self.host_wrapper)
-
- # Add the disk information
- data["local_gb"] = self.disk_dvr.capacity
- data["local_gb_used"] = self.disk_dvr.capacity_used
-
- return data
-
- def update_provider_tree(self, provider_tree, nodename, allocations=None):
- """Update a ProviderTree with current provider and inventory data.
-
- :param nova.compute.provider_tree.ProviderTree provider_tree:
- A nova.compute.provider_tree.ProviderTree object representing all
- the providers in the tree associated with the compute node, and any
- sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
- trait) associated via aggregate with any of those providers (but
- not *their* tree- or aggregate-associated providers), as currently
- known by placement.
- :param nodename:
- String name of the compute node (i.e.
- ComputeNode.hypervisor_hostname) for which the caller is requesting
- updated provider information.
- :param allocations: Currently ignored by this driver.
- """
- # Get (legacy) resource information. Same as get_available_resource,
- # but we don't need to refresh self.host_wrapper as it was *just*
- # refreshed by get_available_resource in the resource tracker's
- # update_available_resource flow.
- data = self._get_available_resource()
-
- # NOTE(yikun): If the inv record does not exists, the allocation_ratio
- # will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
- # is set, and fallback to use the initial_xxx_allocation_ratio
- # otherwise.
- inv = provider_tree.data(nodename).inventory
- ratios = self._get_allocation_ratios(inv)
- # TODO(efried): Fix these to reflect something like reality
- cpu_reserved = CONF.reserved_host_cpus
- mem_reserved = CONF.reserved_host_memory_mb
- disk_reserved = self._get_reserved_host_disk_gb_from_config()
-
- inventory = {
- orc.VCPU: {
- 'total': data['vcpus'],
- 'max_unit': data['vcpus'],
- 'allocation_ratio': ratios[orc.VCPU],
- 'reserved': cpu_reserved,
- },
- orc.MEMORY_MB: {
- 'total': data['memory_mb'],
- 'max_unit': data['memory_mb'],
- 'allocation_ratio': ratios[orc.MEMORY_MB],
- 'reserved': mem_reserved,
- },
- orc.DISK_GB: {
- # TODO(efried): Proper DISK_GB sharing when SSP driver in play
- 'total': int(data['local_gb']),
- 'max_unit': int(data['local_gb']),
- 'allocation_ratio': ratios[orc.DISK_GB],
- 'reserved': disk_reserved,
- },
- }
- provider_tree.update_inventory(nodename, inventory)
-
- def spawn(self, context, instance, image_meta, injected_files,
- admin_password, allocations, network_info=None,
- block_device_info=None, power_on=True, accel_info=None):
- """Create a new instance/VM/domain on the virtualization platform.
-
- Once this successfully completes, the instance should be
- running (power_state.RUNNING).
-
- If this fails, any partial instance should be completely
- cleaned up, and the virtualization platform should be in the state
- that it was before this call began.
-
- :param context: security context
- :param instance: nova.objects.instance.Instance
- This function should use the data there to guide
- the creation of the new instance.
- :param nova.objects.ImageMeta image_meta:
- The metadata of the image of the instance.
- :param injected_files: User files to inject into instance.
- :param admin_password: Administrator password to set in instance.
- :param allocations: Information about resources allocated to the
- instance via placement, of the form returned by
- SchedulerReportClient.get_allocations_for_consumer.
- :param network_info: instance network information
- :param block_device_info: Information about block devices to be
- attached to the instance.
- :param power_on: True if the instance should be powered on, False
- otherwise
- """
- self._log_operation('spawn', instance)
- # Define the flow
- flow_spawn = tf_lf.Flow("spawn")
-
- # This FeedTask accumulates VIOS storage connection operations to be
- # run in parallel. Include both SCSI and fibre channel mappings for
- # the scrubber.
- stg_ftsk = pvm_par.build_active_vio_feed_task(
- self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
-
- flow_spawn.add(tf_vm.Create(
- self.adapter, self.host_wrapper, instance, stg_ftsk))
-
- # Create a flow for the IO
- flow_spawn.add(tf_net.PlugVifs(
- self.virtapi, self.adapter, instance, network_info))
- flow_spawn.add(tf_net.PlugMgmtVif(
- self.adapter, instance))
-
- # Create the boot image.
- flow_spawn.add(tf_stg.CreateDiskForImg(
- self.disk_dvr, context, instance, image_meta))
- # Connects up the disk to the LPAR
- flow_spawn.add(tf_stg.AttachDisk(
- self.disk_dvr, instance, stg_ftsk=stg_ftsk))
-
- # Extract the block devices.
- bdms = driver.block_device_info_get_mapping(block_device_info)
-
- # Determine if there are volumes to connect. If so, add a connection
- # for each type.
- for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms,
- stg_ftsk=stg_ftsk):
- # Connect the volume. This will update the connection_info.
- flow_spawn.add(tf_stg.AttachVolume(vol_drv))
-
- # If the config drive is needed, add those steps. Should be done
- # after all the other I/O.
- if configdrive.required_by(instance):
- flow_spawn.add(tf_stg.CreateAndConnectCfgDrive(
- self.adapter, instance, injected_files, network_info,
- stg_ftsk, admin_pass=admin_password))
-
- # Add the transaction manager flow at the end of the 'I/O
- # connection' tasks. This will run all the connections in parallel.
- flow_spawn.add(stg_ftsk)
-
- # Last step is to power on the system.
- flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))
-
- # Run the flow.
- tf_base.run(flow_spawn, instance=instance)
-
- def destroy(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True):
- """Destroy the specified instance from the Hypervisor.
-
- If the instance is not found (for example if networking failed), this
- function should still succeed. It's probably a good idea to log a
- warning in that case.
-
- :param context: security context
- :param instance: Instance object as returned by DB layer.
- :param network_info: instance network information
- :param block_device_info: Information about block devices that should
- be detached from the instance.
- :param destroy_disks: Indicates if disks should be destroyed
- """
- # TODO(thorst, efried) Add resize checks for destroy
-
- self._log_operation('destroy', instance)
-
- def _setup_flow_and_run():
- # Define the flow
- flow = tf_lf.Flow("destroy")
-
- # Power Off the LPAR. If its disks are about to be deleted, issue a
- # hard shutdown.
- flow.add(tf_vm.PowerOff(self.adapter, instance,
- force_immediate=destroy_disks))
-
- # The FeedTask accumulates storage disconnection tasks to be run in
- # parallel.
- stg_ftsk = pvm_par.build_active_vio_feed_task(
- self.adapter, xag=[pvm_const.XAG.VIO_SMAP])
-
- # Call the unplug VIFs task. While CNAs get removed from the LPAR
- # directly on the destroy, this clears up the I/O Host side.
- flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
-
- # Add the disconnect/deletion of the vOpt to the transaction
- # manager.
- if configdrive.required_by(instance):
- flow.add(tf_stg.DeleteVOpt(
- self.adapter, instance, stg_ftsk=stg_ftsk))
-
- # Extract the block devices.
- bdms = driver.block_device_info_get_mapping(block_device_info)
-
- # Determine if there are volumes to detach. If so, remove each
- # volume (within the transaction manager)
- for bdm, vol_drv in self._vol_drv_iter(
- context, instance, bdms, stg_ftsk=stg_ftsk):
- flow.add(tf_stg.DetachVolume(vol_drv))
-
- # Detach the disk storage adapters
- flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))
-
- # Accumulated storage disconnection tasks next
- flow.add(stg_ftsk)
-
- # Delete the storage disks
- if destroy_disks:
- flow.add(tf_stg.DeleteDisk(self.disk_dvr))
-
- # TODO(thorst, efried) Add LPAR id based scsi map clean up task
- flow.add(tf_vm.Delete(self.adapter, instance))
-
- # Build the engine & run!
- tf_base.run(flow, instance=instance)
-
- try:
- _setup_flow_and_run()
- except exc.InstanceNotFound:
- LOG.debug('VM was not found during destroy operation.',
- instance=instance)
- return
- except pvm_exc.Error as e:
- LOG.exception("PowerVM error during destroy.", instance=instance)
- # Convert to a Nova exception
- raise exc.InstanceTerminationFailure(reason=str(e))
-
- def snapshot(self, context, instance, image_id, update_task_state):
- """Snapshots the specified instance.
-
- :param context: security context
- :param instance: nova.objects.instance.Instance
- :param image_id: Reference to a pre-created image that will hold the
- snapshot.
- :param update_task_state: Callback function to update the task_state
- on the instance while the snapshot operation progresses. The
- function takes a task_state argument and an optional
- expected_task_state kwarg which defaults to
- nova.compute.task_states.IMAGE_SNAPSHOT. See
- nova.objects.instance.Instance.save for expected_task_state usage.
- """
-
- if not self.disk_dvr.capabilities.get('snapshot'):
- raise exc.NotSupportedWithOption(
- message=_("The snapshot operation is not supported in "
- "conjunction with a [powervm]/disk_driver setting "
- "of %s.") % CONF.powervm.disk_driver)
-
- self._log_operation('snapshot', instance)
-
- # Define the flow.
- flow = tf_lf.Flow("snapshot")
-
- # Notify that we're starting the process.
- flow.add(tf_img.UpdateTaskState(update_task_state,
- task_states.IMAGE_PENDING_UPLOAD))
-
- # Connect the instance's boot disk to the management partition, and
- # scan the scsi bus and bring the device into the management partition.
- flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))
-
- # Notify that the upload is in progress.
- flow.add(tf_img.UpdateTaskState(
- update_task_state, task_states.IMAGE_UPLOADING,
- expected_state=task_states.IMAGE_PENDING_UPLOAD))
-
- # Stream the disk to glance.
- flow.add(tf_img.StreamToGlance(context, self.image_api, image_id,
- instance))
-
- # Disconnect the boot disk from the management partition and delete the
- # device.
- flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))
-
- # Run the flow.
- tf_base.run(flow, instance=instance)
-
- def power_off(self, instance, timeout=0, retry_interval=0):
- """Power off the specified instance.
-
- :param instance: nova.objects.instance.Instance
- :param timeout: time to wait for GuestOS to shutdown
- :param retry_interval: How often to signal guest while
- waiting for it to shutdown
- """
- self._log_operation('power_off', instance)
- force_immediate = (timeout == 0)
- timeout = timeout or None
- vm.power_off(self.adapter, instance, force_immediate=force_immediate,
- timeout=timeout)
-
- def power_on(self, context, instance, network_info,
- block_device_info=None, accel_info=None):
- """Power on the specified instance.
-
- :param instance: nova.objects.instance.Instance
- """
- self._log_operation('power_on', instance)
- vm.power_on(self.adapter, instance)
-
- def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None, bad_volumes_callback=None,
- accel_info=None):
- """Reboot the specified instance.
-
- After this is called successfully, the instance's state
- goes back to power_state.RUNNING. The virtualization
- platform should ensure that the reboot action has completed
- successfully even in cases in which the underlying domain/vm
- is paused or halted/stopped.
-
- :param instance: nova.objects.instance.Instance
- :param network_info: `nova.network.models.NetworkInfo` object
- describing the network metadata.
- :param reboot_type: Either a HARD or SOFT reboot
- :param block_device_info: Info pertaining to attached volumes
- :param bad_volumes_callback: Function to handle any bad volumes
- encountered
- :param accel_info: List of accelerator request dicts. The exact
- data struct is doc'd in nova/virt/driver.py::spawn().
- """
- self._log_operation(reboot_type + ' reboot', instance)
- vm.reboot(self.adapter, instance, reboot_type == 'HARD')
- # pypowervm exceptions are sufficient to indicate real failure.
- # Otherwise, pypowervm thinks the instance is up.
-
- def attach_interface(self, context, instance, image_meta, vif):
- """Attach an interface to the instance."""
- self.plug_vifs(instance, [vif])
-
- def detach_interface(self, context, instance, vif):
- """Detach an interface from the instance."""
- self.unplug_vifs(instance, [vif])
-
- def plug_vifs(self, instance, network_info):
- """Plug VIFs into networks."""
- self._log_operation('plug_vifs', instance)
-
- # Define the flow
- flow = tf_lf.Flow("plug_vifs")
-
- # Get the LPAR Wrapper
- flow.add(tf_vm.Get(self.adapter, instance))
-
- # Run the attach
- flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance,
- network_info))
-
- # Run the flow
- try:
- tf_base.run(flow, instance=instance)
- except exc.InstanceNotFound:
- raise exc.VirtualInterfacePlugException(
- _("Plug vif failed because instance %s was not found.")
- % instance.name)
- except Exception:
- LOG.exception("PowerVM error plugging vifs.", instance=instance)
- raise exc.VirtualInterfacePlugException(
- _("Plug vif failed because of an unexpected error."))
-
- def unplug_vifs(self, instance, network_info):
- """Unplug VIFs from networks."""
- self._log_operation('unplug_vifs', instance)
-
- # Define the flow
- flow = tf_lf.Flow("unplug_vifs")
-
- # Run the detach
- flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
-
- # Run the flow
- try:
- tf_base.run(flow, instance=instance)
- except exc.InstanceNotFound:
- LOG.warning('VM was not found during unplug operation as it is '
- 'already possibly deleted.', instance=instance)
- except Exception:
- LOG.exception("PowerVM error trying to unplug vifs.",
- instance=instance)
- raise exc.InterfaceDetachFailed(instance_uuid=instance.uuid)
-
- def get_vnc_console(self, context, instance):
- """Get connection info for a vnc console.
-
- :param context: security context
- :param instance: nova.objects.instance.Instance
-
- :return: An instance of console.type.ConsoleVNC
- """
- self._log_operation('get_vnc_console', instance)
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- # Build the connection to the VNC.
- host = CONF.vnc.server_proxyclient_address
- # TODO(thorst, efried) Add the x509 certificate support when it lands
-
- try:
- # Open up a remote vterm
- port = pvm_vterm.open_remotable_vnc_vterm(
- self.adapter, lpar_uuid, host, vnc_path=lpar_uuid)
- # Note that the VNC viewer will wrap the internal_access_path with
- # the HTTP content.
- return console_type.ConsoleVNC(host=host, port=port,
- internal_access_path=lpar_uuid)
- except pvm_exc.HttpError as e:
- with excutils.save_and_reraise_exception(logger=LOG) as sare:
- # If the LPAR was not found, raise a more descriptive error
- if e.response.status == 404:
- sare.reraise = False
- raise exc.InstanceNotFound(instance_id=instance.uuid)
-
- def attach_volume(self, context, connection_info, instance, mountpoint,
- disk_bus=None, device_type=None, encryption=None):
- """Attach the volume to the instance using the connection_info.
-
- :param context: security context
- :param connection_info: Volume connection information from the block
- device mapping
- :param instance: nova.objects.instance.Instance
- :param mountpoint: Unused
- :param disk_bus: Unused
- :param device_type: Unused
- :param encryption: Unused
- """
- self._log_operation('attach_volume', instance)
-
- # Define the flow
- flow = tf_lf.Flow("attach_volume")
-
- # Build the driver
- vol_drv = volume.build_volume_driver(self.adapter, instance,
- connection_info)
-
- # Add the volume attach to the flow.
- flow.add(tf_stg.AttachVolume(vol_drv))
-
- # Run the flow
- tf_base.run(flow, instance=instance)
-
- # The volume connector may have updated the system metadata. Save
- # the instance to persist the data. Spawn/destroy auto saves instance,
- # but the attach does not. Detach does not need this save - as the
- # detach flows do not (currently) modify system metadata. May need
- # to revise in the future as volume connectors evolve.
- instance.save()
-
- def detach_volume(self, context, connection_info, instance, mountpoint,
- encryption=None):
- """Detach the volume attached to the instance.
-
- :param context: security context
- :param connection_info: Volume connection information from the block
- device mapping
- :param instance: nova.objects.instance.Instance
- :param mountpoint: Unused
- :param encryption: Unused
- """
- self._log_operation('detach_volume', instance)
-
- # Define the flow
- flow = tf_lf.Flow("detach_volume")
-
- # Get a volume adapter for this volume
- vol_drv = volume.build_volume_driver(self.adapter, instance,
- connection_info)
-
- # Add a task to detach the volume
- flow.add(tf_stg.DetachVolume(vol_drv))
-
- # Run the flow
- tf_base.run(flow, instance=instance)
-
- def extend_volume(self, context, connection_info, instance,
- requested_size):
- """Extend the disk attached to the instance.
-
- :param context: security context
- :param dict connection_info: The connection for the extended volume.
- :param nova.objects.instance.Instance instance:
- The instance whose volume gets extended.
- :param int requested_size: The requested new volume size in bytes.
- :return: None
- """
-
- vol_drv = volume.build_volume_driver(
- self.adapter, instance, connection_info)
- vol_drv.extend_volume()
-
- def _vol_drv_iter(self, context, instance, bdms, stg_ftsk=None):
- """Yields a bdm and volume driver.
-
- :param context: security context
- :param instance: nova.objects.instance.Instance
- :param bdms: block device mappings
- :param stg_ftsk: storage FeedTask
- """
- # Get a volume driver for each volume
- for bdm in bdms or []:
- conn_info = bdm.get('connection_info')
- vol_drv = volume.build_volume_driver(self.adapter, instance,
- conn_info, stg_ftsk=stg_ftsk)
- yield bdm, vol_drv
-
- def get_volume_connector(self, instance):
- """Get connector information for the instance for attaching to volumes.
-
- Connector information is a dictionary representing information about
- the system that will be making the connection.
-
- :param instance: nova.objects.instance.Instance
- """
- # Put the values in the connector
- connector = {}
- wwpn_list = fcvscsi.wwpns(self.adapter)
-
- if wwpn_list is not None:
- connector["wwpns"] = wwpn_list
- connector["multipath"] = False
- connector['host'] = CONF.host
- connector['initiator'] = None
-
- return connector
diff --git a/nova/virt/powervm/host.py b/nova/virt/powervm/host.py
deleted file mode 100644
index 2b206fee41..0000000000
--- a/nova/virt/powervm/host.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import math
-
-from oslo_serialization import jsonutils
-
-from nova import conf as cfg
-from nova.objects import fields
-
-
-CONF = cfg.CONF
-
-# Power VM hypervisor info
-# Normally, the hypervisor version is a string in the form of '8.0.0' and
-# converted to an int with nova.virt.utils.convert_version_to_int() however
-# there isn't currently a mechanism to retrieve the exact version.
-# Complicating this is the fact that nova conductor only allows live migration
-# from the source host to the destination if the source is equal to or less
-# than the destination version. PowerVM live migration limitations are
-# checked by the PowerVM capabilities flags and not specific version levels.
-# For that reason, we'll just publish the major level.
-IBM_POWERVM_HYPERVISOR_VERSION = 8
-
-# The types of LPARS that are supported.
-POWERVM_SUPPORTED_INSTANCES = [
- (fields.Architecture.PPC64, fields.HVType.PHYP, fields.VMMode.HVM),
- (fields.Architecture.PPC64LE, fields.HVType.PHYP, fields.VMMode.HVM)]
-
-
-def build_host_resource_from_ms(ms_w):
- """Build the host resource dict from a ManagedSystem PowerVM wrapper.
-
- :param ms_w: The pypowervm System wrapper describing the managed system.
- """
- data = {}
- # Calculate the vcpus
- proc_units = ms_w.proc_units_configurable
- pu_used = float(proc_units) - float(ms_w.proc_units_avail)
- data['vcpus'] = int(math.ceil(float(proc_units)))
- data['vcpus_used'] = int(math.ceil(pu_used))
- data['memory_mb'] = ms_w.memory_configurable
- data['memory_mb_used'] = (ms_w.memory_configurable -
- ms_w.memory_free)
- data["hypervisor_type"] = fields.HVType.PHYP
- data["hypervisor_version"] = IBM_POWERVM_HYPERVISOR_VERSION
- data["hypervisor_hostname"] = CONF.host
- data["cpu_info"] = jsonutils.dumps({'vendor': 'ibm', 'arch': 'ppc64'})
- data["numa_topology"] = None
- data["supported_instances"] = POWERVM_SUPPORTED_INSTANCES
- stats = {'proc_units': '%.2f' % float(proc_units),
- 'proc_units_used': '%.2f' % pu_used,
- 'memory_region_size': ms_w.memory_region_size}
- data["stats"] = stats
- return data
diff --git a/nova/virt/powervm/image.py b/nova/virt/powervm/image.py
deleted file mode 100644
index b4636b0f11..0000000000
--- a/nova/virt/powervm/image.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Utilities related to glance image management for the PowerVM driver."""
-
-from nova import utils
-
-
-def stream_blockdev_to_glance(context, image_api, image_id, metadata, devpath):
- """Stream the entire contents of a block device to a glance image.
-
- :param context: Nova security context.
- :param image_api: Handle to the glance image API.
- :param image_id: UUID of the prepared glance image.
- :param metadata: Dictionary of metadata for the image.
- :param devpath: String path to device file of block device to be uploaded,
- e.g. "/dev/sde".
- """
- # Make the device file owned by the current user for the duration of the
- # operation.
- with utils.temporary_chown(devpath), open(devpath, 'rb') as stream:
- # Stream it. This is synchronous.
- image_api.update(context, image_id, metadata, stream)
-
-
-def generate_snapshot_metadata(context, image_api, image_id, instance):
- """Generate a metadata dictionary for an instance snapshot.
-
- :param context: Nova security context.
- :param image_api: Handle to the glance image API.
- :param image_id: UUID of the prepared glance image.
- :param instance: The Nova instance whose disk is to be snapshotted.
- :return: A dict of metadata suitable for image_api.update.
- """
- image = image_api.get(context, image_id)
-
- # TODO(esberglu): Update this to v2 metadata
- metadata = {
- 'name': image['name'],
- 'status': 'active',
- 'disk_format': 'raw',
- 'container_format': 'bare',
- 'properties': {
- 'image_location': 'snapshot',
- 'image_state': 'available',
- 'owner_id': instance.project_id,
- }
- }
- return metadata
diff --git a/nova/virt/powervm/media.py b/nova/virt/powervm/media.py
deleted file mode 100644
index f57ddd332d..0000000000
--- a/nova/virt/powervm/media.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import os
-import tempfile
-
-from oslo_log import log as logging
-from oslo_utils import excutils
-from pypowervm import const as pvm_const
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.tasks import vopt as tsk_vopt
-from pypowervm import util as pvm_util
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-import retrying
-from taskflow import task
-
-from nova.api.metadata import base as instance_metadata
-from nova.network import model as network_model
-from nova.virt import configdrive
-from nova.virt.powervm import vm
-
-
-LOG = logging.getLogger(__name__)
-
-_LLA_SUBNET = "fe80::/64"
-# TODO(efried): CONF these (maybe)
-_VOPT_VG = 'rootvg'
-_VOPT_SIZE_GB = 1
-
-
-class ConfigDrivePowerVM(object):
-
- def __init__(self, adapter):
- """Creates the config drive manager for PowerVM.
-
- :param adapter: The pypowervm adapter to communicate with the system.
- """
- self.adapter = adapter
-
- # Validate that the virtual optical exists
- self.vios_uuid, self.vg_uuid = tsk_vopt.validate_vopt_repo_exists(
- self.adapter, vopt_media_volume_group=_VOPT_VG,
- vopt_media_rep_size=_VOPT_SIZE_GB)
-
- @staticmethod
- def _sanitize_network_info(network_info):
- """Will sanitize the network info for the config drive.
-
- Newer versions of cloud-init look at the vif type information in
- the network info and utilize it to determine what to do. There are
- a limited number of vif types, and it seems to be built on the idea
- that the neutron vif type is the cloud init vif type (which is not
- quite right).
-
- This sanitizes the network info that gets passed into the config
- drive to work properly with cloud-inits.
- """
- network_info = copy.deepcopy(network_info)
-
- # OVS is the only supported vif type. All others (SEA, PowerVM SR-IOV)
- # will default to generic vif.
- for vif in network_info:
- if vif.get('type') != 'ovs':
- LOG.debug('Changing vif type from %(type)s to vif for vif '
- '%(id)s.', {'type': vif.get('type'),
- 'id': vif.get('id')})
- vif['type'] = 'vif'
- return network_info
-
- def _create_cfg_dr_iso(self, instance, injected_files, network_info,
- iso_path, admin_pass=None):
- """Creates an ISO file that contains the injected files.
-
- Used for config drive.
-
- :param instance: The VM instance from OpenStack.
- :param injected_files: A list of file paths that will be injected into
- the ISO.
- :param network_info: The network_info from the nova spawn method.
- :param iso_path: The absolute file path for the new ISO
- :param admin_pass: Optional password to inject for the VM.
- """
- LOG.info("Creating config drive.", instance=instance)
- extra_md = {}
- if admin_pass is not None:
- extra_md['admin_pass'] = admin_pass
-
- # Sanitize the vifs for the network config
- network_info = self._sanitize_network_info(network_info)
-
- inst_md = instance_metadata.InstanceMetadata(instance,
- content=injected_files,
- extra_md=extra_md,
- network_info=network_info)
-
- with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
- LOG.info("Config drive ISO being built in %s.", iso_path,
- instance=instance)
-
- # There may be an OSError exception when create the config drive.
- # If so, retry the operation before raising.
- @retrying.retry(retry_on_exception=lambda exc: isinstance(
- exc, OSError), stop_max_attempt_number=2)
- def _make_cfg_drive(iso_path):
- cdb.make_drive(iso_path)
-
- try:
- _make_cfg_drive(iso_path)
- except OSError:
- with excutils.save_and_reraise_exception(logger=LOG):
- LOG.exception("Config drive ISO could not be built",
- instance=instance)
-
- def create_cfg_drv_vopt(self, instance, injected_files, network_info,
- stg_ftsk, admin_pass=None, mgmt_cna=None):
- """Create the config drive virtual optical and attach to VM.
-
- :param instance: The VM instance from OpenStack.
- :param injected_files: A list of file paths that will be injected into
- the ISO.
- :param network_info: The network_info from the nova spawn method.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- :param admin_pass: (Optional) password to inject for the VM.
- :param mgmt_cna: (Optional) The management (RMC) CNA wrapper.
- """
- # If there is a management client network adapter, then we should
- # convert that to a VIF and add it to the network info
- if mgmt_cna is not None:
- network_info = copy.deepcopy(network_info)
- network_info.append(self._mgmt_cna_to_vif(mgmt_cna))
-
- # Pick a file name for when we upload the media to VIOS
- file_name = pvm_util.sanitize_file_name_for_api(
- instance.uuid.replace('-', ''), prefix='cfg_', suffix='.iso',
- max_len=pvm_const.MaxLen.VOPT_NAME)
-
- # Create and upload the media
- with tempfile.NamedTemporaryFile(mode='rb') as fh:
- self._create_cfg_dr_iso(instance, injected_files, network_info,
- fh.name, admin_pass=admin_pass)
- vopt, f_uuid = tsk_stg.upload_vopt(
- self.adapter, self.vios_uuid, fh, file_name,
- os.path.getsize(fh.name))
-
- # Define the function to build and add the mapping
- def add_func(vios_w):
- LOG.info("Adding cfg drive mapping to Virtual I/O Server %s.",
- vios_w.name, instance=instance)
- mapping = tsk_map.build_vscsi_mapping(
- None, vios_w, vm.get_pvm_uuid(instance), vopt)
- return tsk_map.add_map(vios_w, mapping)
-
- # Add the subtask to create the mapping when the FeedTask runs
- stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(add_func)
-
- def _mgmt_cna_to_vif(self, cna):
- """Converts the mgmt CNA to VIF format for network injection."""
- mac = vm.norm_mac(cna.mac)
- ipv6_link_local = self._mac_to_link_local(mac)
-
- subnet = network_model.Subnet(
- version=6, cidr=_LLA_SUBNET,
- ips=[network_model.FixedIP(address=ipv6_link_local)])
- network = network_model.Network(id='mgmt', subnets=[subnet],
- injected='yes')
- return network_model.VIF(id='mgmt_vif', address=mac,
- network=network)
-
- @staticmethod
- def _mac_to_link_local(mac):
- # Convert the address to IPv6. The first step is to separate out the
- # mac address
- splits = mac.split(':')
-
- # Create EUI-64 id per RFC 4291 Appendix A
- splits.insert(3, 'ff')
- splits.insert(4, 'fe')
-
- # Create modified EUI-64 id via bit flip per RFC 4291 Appendix A
- splits[0] = "%.2x" % (int(splits[0], 16) ^ 0b00000010)
-
- # Convert to the IPv6 link local format. The prefix is fe80::. Join
- # the hexes together at every other digit.
- ll = ['fe80:']
- ll.extend([splits[x] + splits[x + 1]
- for x in range(0, len(splits), 2)])
- return ':'.join(ll)
-
- def dlt_vopt(self, instance, stg_ftsk):
- """Deletes the virtual optical and scsi mappings for a VM.
-
- :param instance: The nova instance whose VOpt(s) are to be removed.
- :param stg_ftsk: A FeedTask. The actions to modify the storage will be
- added as batched functions onto the FeedTask.
- """
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- # The matching function for find_maps, remove_maps
- match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)
-
- # Add a function to remove the mappings
- stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
- tsk_map.remove_maps, lpar_uuid, match_func=match_func)
-
- # Find the VOpt device based from the mappings
- media_mappings = tsk_map.find_maps(
- stg_ftsk.get_wrapper(self.vios_uuid).scsi_mappings,
- client_lpar_id=lpar_uuid, match_func=match_func)
- media_elems = [x.backing_storage for x in media_mappings]
-
- def rm_vopt():
- LOG.info("Removing virtual optical storage.",
- instance=instance)
- vg_wrap = pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid,
- parent_type=pvm_vios.VIOS,
- parent_uuid=self.vios_uuid)
- tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems)
-
- # Add task to remove the media if it exists
- if media_elems:
- stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
diff --git a/nova/virt/powervm/mgmt.py b/nova/virt/powervm/mgmt.py
deleted file mode 100644
index a62fa29bde..0000000000
--- a/nova/virt/powervm/mgmt.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Utilities related to the PowerVM management partition.
-
-The management partition is a special LPAR that runs the PowerVM REST API
-service. It itself appears through the REST API as a LogicalPartition of type
-aixlinux, but with the is_mgmt_partition property set to True.
-The PowerVM Nova Compute service runs on the management partition.
-"""
-import glob
-import os
-from os import path
-
-from oslo_concurrency import lockutils
-from oslo_log import log as logging
-from pypowervm.tasks import partition as pvm_par
-import retrying
-
-from nova import exception
-import nova.privsep.path
-
-
-LOG = logging.getLogger(__name__)
-
-_MP_UUID = None
-
-
-@lockutils.synchronized("mgmt_lpar_uuid")
-def mgmt_uuid(adapter):
- """Returns the management partitions UUID."""
- global _MP_UUID
- if not _MP_UUID:
- _MP_UUID = pvm_par.get_this_partition(adapter).uuid
- return _MP_UUID
-
-
-def discover_vscsi_disk(mapping, scan_timeout=300):
- """Bring a mapped device into the management partition and find its name.
-
- Based on a VSCSIMapping, scan the appropriate virtual SCSI host bus,
- causing the operating system to discover the mapped device. Find and
- return the path of the newly-discovered device based on its UDID in the
- mapping.
-
- Note: scanning the bus will cause the operating system to discover *all*
- devices on that bus. However, this method will only return the path for
- the specific device from the input mapping, based on its UDID.
-
- :param mapping: The pypowervm.wrappers.virtual_io_server.VSCSIMapping
- representing the mapping of the desired disk to the
- management partition.
- :param scan_timeout: The maximum number of seconds after scanning to wait
- for the specified device to appear.
- :return: The udev-generated ("/dev/sdX") name of the discovered disk.
- :raise NoDiskDiscoveryException: If the disk did not appear after the
- specified timeout.
- :raise UniqueDiskDiscoveryException: If more than one disk appears with the
- expected UDID.
- """
- # Calculate the Linux slot number from the client adapter slot number.
- lslot = 0x30000000 | mapping.client_adapter.lpar_slot_num
- # We'll match the device ID based on the UDID, which is actually the last
- # 32 chars of the field we get from PowerVM.
- udid = mapping.backing_storage.udid[-32:]
-
- LOG.debug("Trying to discover VSCSI disk with UDID %(udid)s on slot "
- "%(slot)x.", {'udid': udid, 'slot': lslot})
-
- # Find the special file to scan the bus, and scan it.
- # This glob should yield exactly one result, but use the loop just in case.
- for scanpath in glob.glob(
- '/sys/bus/vio/devices/%x/host*/scsi_host/host*/scan' % lslot):
- # Writing '- - -' to this sysfs file triggers bus rescan
- nova.privsep.path.writefile(scanpath, 'a', '- - -')
-
- # Now see if our device showed up. If so, we can reliably match it based
- # on its Linux ID, which ends with the disk's UDID.
- dpathpat = '/dev/disk/by-id/*%s' % udid
-
- # The bus scan is asynchronous. Need to poll, waiting for the device to
- # spring into existence. Stop when glob finds at least one device, or
- # after the specified timeout. Sleep 1/4 second between polls.
- @retrying.retry(retry_on_result=lambda result: not result, wait_fixed=250,
- stop_max_delay=scan_timeout * 1000)
- def _poll_for_dev(globpat):
- return glob.glob(globpat)
- try:
- disks = _poll_for_dev(dpathpat)
- except retrying.RetryError as re:
- raise exception.NoDiskDiscoveryException(
- bus=lslot, udid=udid, polls=re.last_attempt.attempt_number,
- timeout=scan_timeout)
- # If we get here, _poll_for_dev returned a nonempty list. If not exactly
- # one entry, this is an error.
- if len(disks) != 1:
- raise exception.UniqueDiskDiscoveryException(path_pattern=dpathpat,
- count=len(disks))
-
- # The by-id path is a symlink. Resolve to the /dev/sdX path
- dpath = path.realpath(disks[0])
- LOG.debug("Discovered VSCSI disk with UDID %(udid)s on slot %(slot)x at "
- "path %(devname)s.",
- {'udid': udid, 'slot': lslot, 'devname': dpath})
- return dpath
-
-
-def remove_block_dev(devpath, scan_timeout=10):
- """Remove a block device from the management partition.
-
- This method causes the operating system of the management partition to
- delete the device special files associated with the specified block device.
-
- :param devpath: Any path to the block special file associated with the
- device to be removed.
- :param scan_timeout: The maximum number of seconds after scanning to wait
- for the specified device to disappear.
- :raise InvalidDevicePath: If the specified device or its 'delete' special
- file cannot be found.
- :raise DeviceDeletionException: If the deletion was attempted, but the
- device special file is still present
- afterward.
- """
- # Resolve symlinks, if any, to get to the /dev/sdX path
- devpath = path.realpath(devpath)
- try:
- os.stat(devpath)
- except OSError:
- raise exception.InvalidDevicePath(path=devpath)
- devname = devpath.rsplit('/', 1)[-1]
- delpath = '/sys/block/%s/device/delete' % devname
- try:
- os.stat(delpath)
- except OSError:
- raise exception.InvalidDevicePath(path=delpath)
- LOG.debug("Deleting block device %(devpath)s from the management "
- "partition via special file %(delpath)s.",
- {'devpath': devpath, 'delpath': delpath})
- # Writing '1' to this sysfs file deletes the block device and rescans.
- nova.privsep.path.writefile(delpath, 'a', '1')
-
- # The bus scan is asynchronous. Need to poll, waiting for the device to
- # disappear. Stop when stat raises OSError (dev file not found) - which is
- # success - or after the specified timeout (which is failure). Sleep 1/4
- # second between polls.
- @retrying.retry(retry_on_result=lambda result: result, wait_fixed=250,
- stop_max_delay=scan_timeout * 1000)
- def _poll_for_del(statpath):
- try:
- os.stat(statpath)
- return True
- except OSError:
- # Device special file is absent, as expected
- return False
- try:
- _poll_for_del(devpath)
- except retrying.RetryError as re:
- # stat just kept returning (dev file continued to exist).
- raise exception.DeviceDeletionException(
- devpath=devpath, polls=re.last_attempt.attempt_number,
- timeout=scan_timeout)
- # Else stat raised - the device disappeared - all done.
diff --git a/nova/virt/powervm/tasks/__init__.py b/nova/virt/powervm/tasks/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/virt/powervm/tasks/__init__.py
+++ /dev/null
diff --git a/nova/virt/powervm/tasks/base.py b/nova/virt/powervm/tasks/base.py
deleted file mode 100644
index 07714d5b8f..0000000000
--- a/nova/virt/powervm/tasks/base.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2016, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from oslo_log import log as logging
-from taskflow import engines as tf_eng
-from taskflow.listeners import timing as tf_tm
-
-
-LOG = logging.getLogger(__name__)
-
-
-def run(flow, instance=None):
- """Run a TaskFlow Flow with task timing and logging with instance.
-
- :param flow: A taskflow.flow.Flow to run.
- :param instance: A nova instance, for logging.
- :return: The result of taskflow.engines.run(), a dictionary of named
- results of the Flow's execution.
- """
- def log_with_instance(*args, **kwargs):
- """Wrapper for LOG.info(*args, **kwargs, instance=instance)."""
- if instance is not None:
- kwargs['instance'] = instance
- LOG.info(*args, **kwargs)
-
- eng = tf_eng.load(flow)
- with tf_tm.PrintingDurationListener(eng, printer=log_with_instance):
- return eng.run()
diff --git a/nova/virt/powervm/tasks/image.py b/nova/virt/powervm/tasks/image.py
deleted file mode 100644
index 4f8fe4ba18..0000000000
--- a/nova/virt/powervm/tasks/image.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from taskflow import task
-
-from nova.virt.powervm import image
-
-
-LOG = logging.getLogger(__name__)
-
-
-class UpdateTaskState(task.Task):
-
- def __init__(self, update_task_state, task_state, expected_state=None):
- """Invoke the update_task_state callback with the desired arguments.
-
- :param update_task_state: update_task_state callable passed into
- snapshot.
- :param task_state: The new task state (from nova.compute.task_states)
- to set.
- :param expected_state: Optional. The expected state of the task prior
- to this request.
- """
- self.update_task_state = update_task_state
- self.task_state = task_state
- self.kwargs = {}
- if expected_state is not None:
- # We only want to pass expected state if it's not None! That's so
- # we take the update_task_state method's default.
- self.kwargs['expected_state'] = expected_state
- super(UpdateTaskState, self).__init__(
- name='update_task_state_%s' % task_state)
-
- def execute(self):
- self.update_task_state(self.task_state, **self.kwargs)
-
-
-class StreamToGlance(task.Task):
-
- """Task around streaming a block device to glance."""
-
- def __init__(self, context, image_api, image_id, instance):
- """Initialize the flow for streaming a block device to glance.
-
- Requires: disk_path: Path to the block device file for the instance's
- boot disk.
- :param context: Nova security context.
- :param image_api: Handle to the glance API.
- :param image_id: UUID of the prepared glance image.
- :param instance: Instance whose backing device is being captured.
- """
- self.context = context
- self.image_api = image_api
- self.image_id = image_id
- self.instance = instance
- super(StreamToGlance, self).__init__(name='stream_to_glance',
- requires='disk_path')
-
- def execute(self, disk_path):
- metadata = image.generate_snapshot_metadata(
- self.context, self.image_api, self.image_id, self.instance)
- LOG.info("Starting stream of boot device (local blockdev %(devpath)s) "
- "to glance image %(img_id)s.",
- {'devpath': disk_path, 'img_id': self.image_id},
- instance=self.instance)
- image.stream_blockdev_to_glance(self.context, self.image_api,
- self.image_id, metadata, disk_path)
diff --git a/nova/virt/powervm/tasks/network.py b/nova/virt/powervm/tasks/network.py
deleted file mode 100644
index d96ff25d9d..0000000000
--- a/nova/virt/powervm/tasks/network.py
+++ /dev/null
@@ -1,259 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import eventlet
-from oslo_log import log as logging
-from pypowervm.tasks import cna as pvm_cna
-from pypowervm.wrappers import managed_system as pvm_ms
-from pypowervm.wrappers import network as pvm_net
-from taskflow import task
-
-from nova import conf as cfg
-from nova import exception
-from nova.virt.powervm import vif
-from nova.virt.powervm import vm
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-SECURE_RMC_VSWITCH = 'MGMTSWITCH'
-SECURE_RMC_VLAN = 4094
-
-
-class PlugVifs(task.Task):
-
- """The task to plug the Virtual Network Interfaces to a VM."""
-
- def __init__(self, virt_api, adapter, instance, network_infos):
- """Create the task.
-
- Provides 'vm_cnas' - the list of the Virtual Machine's Client Network
- Adapters as they stand after all VIFs are plugged. May be None, in
- which case the Task requiring 'vm_cnas' should discover them afresh.
-
- :param virt_api: The VirtAPI for the operation.
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- :param network_infos: The network information containing the nova
- VIFs to create.
- """
- self.virt_api = virt_api
- self.adapter = adapter
- self.instance = instance
- self.network_infos = network_infos or []
- self.crt_network_infos, self.update_network_infos = [], []
- # Cache of CNAs that is filled on initial _vif_exists() call.
- self.cnas = None
-
- super(PlugVifs, self).__init__(
- name='plug_vifs', provides='vm_cnas', requires=['lpar_wrap'])
-
- def _vif_exists(self, network_info):
- """Does the instance have a CNA for a given net?
-
- :param network_info: A network information dict. This method expects
- it to contain key 'address' (MAC address).
- :return: True if a CNA with the network_info's MAC address exists on
- the instance. False otherwise.
- """
- if self.cnas is None:
- self.cnas = vm.get_cnas(self.adapter, self.instance)
- vifs = self.cnas
-
- return network_info['address'] in [vm.norm_mac(v.mac) for v in vifs]
-
- def execute(self, lpar_wrap):
- # Check to see if the LPAR is OK to add VIFs to.
- modifiable, reason = lpar_wrap.can_modify_io()
- if not modifiable:
- LOG.error("Unable to create VIF(s) for instance in the system's "
- "current state. The reason from the system is: %s",
- reason, instance=self.instance)
- raise exception.VirtualInterfaceCreateException()
-
- # We will have two types of network infos. One is for newly created
- # vifs. The others are those that exist, but should be re-'treated'
- for network_info in self.network_infos:
- if self._vif_exists(network_info):
- self.update_network_infos.append(network_info)
- else:
- self.crt_network_infos.append(network_info)
-
- # If there are no vifs to create or update, then just exit immediately.
- if not self.crt_network_infos and not self.update_network_infos:
- return []
-
- # For existing VIFs that we just need to update, run the plug but do
- # not wait for the neutron event as that likely won't be sent (it was
- # already done).
- for network_info in self.update_network_infos:
- LOG.info("Updating VIF with mac %s for instance.",
- network_info['address'], instance=self.instance)
- vif.plug(self.adapter, self.instance, network_info, new_vif=False)
-
- # For the new VIFs, run the creates (and wait for the events back)
- try:
- with self.virt_api.wait_for_instance_event(
- self.instance, self._get_vif_events(),
- deadline=CONF.vif_plugging_timeout,
- error_callback=self._vif_callback_failed):
- for network_info in self.crt_network_infos:
- LOG.info('Creating VIF with mac %s for instance.',
- network_info['address'], instance=self.instance)
- new_vif = vif.plug(
- self.adapter, self.instance, network_info,
- new_vif=True)
- if self.cnas is not None:
- self.cnas.append(new_vif)
- except eventlet.timeout.Timeout:
- LOG.error('Error waiting for VIF to be created for instance.',
- instance=self.instance)
- raise exception.VirtualInterfaceCreateException()
-
- return self.cnas
-
- def _vif_callback_failed(self, event_name, instance):
- LOG.error('VIF Plug failure for callback on event %s for instance.',
- event_name, instance=self.instance)
- if CONF.vif_plugging_is_fatal:
- raise exception.VirtualInterfaceCreateException()
-
- def _get_vif_events(self):
- """Returns the VIF events that need to be received for a VIF plug.
-
- In order for a VIF plug to be successful, certain events should be
- received from other components within the OpenStack ecosystem. This
- method returns the events neutron needs for a given deploy.
- """
- # See libvirt's driver.py -> _get_neutron_events method for
- # more information.
- if CONF.vif_plugging_is_fatal and CONF.vif_plugging_timeout:
- return [('network-vif-plugged', network_info['id'])
- for network_info in self.crt_network_infos
- if not network_info.get('active', True)]
-
- def revert(self, lpar_wrap, result, flow_failures):
- if not self.network_infos:
- return
-
- LOG.warning('VIF creation being rolled back for instance.',
- instance=self.instance)
-
- # Get the current adapters on the system
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
- for network_info in self.crt_network_infos:
- try:
- vif.unplug(self.adapter, self.instance, network_info,
- cna_w_list=cna_w_list)
- except Exception:
- LOG.exception("An exception occurred during an unplug in the "
- "vif rollback. Ignoring.",
- instance=self.instance)
-
-
-class UnplugVifs(task.Task):
-
- """The task to unplug Virtual Network Interfaces from a VM."""
-
- def __init__(self, adapter, instance, network_infos):
- """Create the task.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- :param network_infos: The network information containing the nova
- VIFs to create.
- """
- self.adapter = adapter
- self.instance = instance
- self.network_infos = network_infos or []
-
- super(UnplugVifs, self).__init__(name='unplug_vifs')
-
- def execute(self):
- # If the LPAR is not in an OK state for deleting, then throw an
- # error up front.
- lpar_wrap = vm.get_instance_wrapper(self.adapter, self.instance)
- modifiable, reason = lpar_wrap.can_modify_io()
- if not modifiable:
- LOG.error("Unable to remove VIFs from instance in the system's "
- "current state. The reason reported by the system is: "
- "%s", reason, instance=self.instance)
- raise exception.VirtualInterfaceUnplugException(reason=reason)
-
- # Get all the current Client Network Adapters (CNA) on the VM itself.
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
-
- # Walk through the VIFs and delete the corresponding CNA on the VM.
- for network_info in self.network_infos:
- vif.unplug(self.adapter, self.instance, network_info,
- cna_w_list=cna_w_list)
-
-
-class PlugMgmtVif(task.Task):
-
- """The task to plug the Management VIF into a VM."""
-
- def __init__(self, adapter, instance):
- """Create the task.
-
- Requires 'vm_cnas' from PlugVifs. If None, this Task will retrieve the
- VM's list of CNAs.
-
- Provides the mgmt_cna. This may be None if no management device was
- created. This is the CNA of the mgmt vif for the VM.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- """
- self.adapter = adapter
- self.instance = instance
-
- super(PlugMgmtVif, self).__init__(
- name='plug_mgmt_vif', provides='mgmt_cna', requires=['vm_cnas'])
-
- def execute(self, vm_cnas):
- LOG.info('Plugging the Management Network Interface to instance.',
- instance=self.instance)
- # Determine if we need to create the secure RMC VIF. This should only
- # be needed if there is not a VIF on the secure RMC vSwitch
- vswitch = None
- vswitches = pvm_net.VSwitch.search(
- self.adapter, parent_type=pvm_ms.System.schema_type,
- parent_uuid=self.adapter.sys_uuid, name=SECURE_RMC_VSWITCH)
- if len(vswitches) == 1:
- vswitch = vswitches[0]
-
- if vswitch is None:
- LOG.warning('No management VIF created for instance due to lack '
- 'of Management Virtual Switch', instance=self.instance)
- return None
-
- # This next check verifies that there are no existing NICs on the
- # vSwitch, so that the VM does not end up with multiple RMC VIFs.
- if vm_cnas is None:
- has_mgmt_vif = vm.get_cnas(self.adapter, self.instance,
- vswitch_uri=vswitch.href)
- else:
- has_mgmt_vif = vswitch.href in [cna.vswitch_uri for cna in vm_cnas]
-
- if has_mgmt_vif:
- LOG.debug('Management VIF already created for instance',
- instance=self.instance)
- return None
-
- lpar_uuid = vm.get_pvm_uuid(self.instance)
- return pvm_cna.crt_cna(self.adapter, None, lpar_uuid, SECURE_RMC_VLAN,
- vswitch=SECURE_RMC_VSWITCH, crt_vswitch=True)
diff --git a/nova/virt/powervm/tasks/storage.py b/nova/virt/powervm/tasks/storage.py
deleted file mode 100644
index 24449a1bef..0000000000
--- a/nova/virt/powervm/tasks/storage.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from pypowervm import exceptions as pvm_exc
-from pypowervm.tasks import scsi_mapper as pvm_smap
-from taskflow import task
-from taskflow.types import failure as task_fail
-
-from nova import exception
-from nova.virt import block_device
-from nova.virt.powervm import media
-from nova.virt.powervm import mgmt
-
-LOG = logging.getLogger(__name__)
-
-
-class AttachVolume(task.Task):
-
- """The task to attach a volume to an instance."""
-
- def __init__(self, vol_drv):
- """Create the task.
-
- :param vol_drv: The volume driver. Ties the storage to a connection
- type (ex. vSCSI).
- """
- self.vol_drv = vol_drv
- self.vol_id = block_device.get_volume_id(self.vol_drv.connection_info)
-
- super(AttachVolume, self).__init__(name='attach_vol_%s' % self.vol_id)
-
- def execute(self):
- LOG.info('Attaching volume %(vol)s.', {'vol': self.vol_id},
- instance=self.vol_drv.instance)
- self.vol_drv.attach_volume()
-
- def revert(self, result, flow_failures):
- LOG.warning('Rolling back attachment for volume %(vol)s.',
- {'vol': self.vol_id}, instance=self.vol_drv.instance)
-
- # Note that the rollback is *instant*. Resetting the FeedTask ensures
- # immediate rollback.
- self.vol_drv.reset_stg_ftsk()
- try:
- # We attempt to detach in case we 'partially attached'. In
- # the attach scenario, perhaps one of the Virtual I/O Servers
- # was attached. This attempts to clear anything out to make sure
- # the terminate attachment runs smoothly.
- self.vol_drv.detach_volume()
- except exception.VolumeDetachFailed:
- # Does not block due to being in the revert flow.
- LOG.exception("Unable to detach volume %s during rollback.",
- self.vol_id, instance=self.vol_drv.instance)
-
-
-class DetachVolume(task.Task):
-
- """The task to detach a volume from an instance."""
-
- def __init__(self, vol_drv):
- """Create the task.
-
- :param vol_drv: The volume driver. Ties the storage to a connection
- type (ex. vSCSI).
- """
- self.vol_drv = vol_drv
- self.vol_id = self.vol_drv.connection_info['data']['volume_id']
-
- super(DetachVolume, self).__init__(name='detach_vol_%s' % self.vol_id)
-
- def execute(self):
- LOG.info('Detaching volume %(vol)s.',
- {'vol': self.vol_id}, instance=self.vol_drv.instance)
- self.vol_drv.detach_volume()
-
- def revert(self, result, flow_failures):
- LOG.warning('Reattaching volume %(vol)s on detach rollback.',
- {'vol': self.vol_id}, instance=self.vol_drv.instance)
-
- # Note that the rollback is *instant*. Resetting the FeedTask ensures
- # immediate rollback.
- self.vol_drv.reset_stg_ftsk()
- try:
- # We try to reattach the volume here so that it maintains its
- # linkage (in the hypervisor) to the VM. This makes it easier for
- # operators to understand the linkage between the VMs and volumes
- # in error scenarios. This is simply useful for debug purposes
- # if there is an operational error.
- self.vol_drv.attach_volume()
- except exception.VolumeAttachFailed:
- # Does not block due to being in the revert flow. See above.
- LOG.exception("Unable to reattach volume %s during rollback.",
- self.vol_id, instance=self.vol_drv.instance)
-
-
-class CreateDiskForImg(task.Task):
-
- """The Task to create the disk from an image in the storage."""
-
- def __init__(self, disk_dvr, context, instance, image_meta):
- """Create the Task.
-
- Provides the 'disk_dev_info' for other tasks. Comes from the disk_dvr
- create_disk_from_image method.
-
- :param disk_dvr: The storage driver.
- :param context: The context passed into the driver method.
- :param instance: The nova instance.
- :param nova.objects.ImageMeta image_meta:
- The metadata of the image of the instance.
- """
- super(CreateDiskForImg, self).__init__(
- name='create_disk_from_img', provides='disk_dev_info')
- self.disk_dvr = disk_dvr
- self.instance = instance
- self.context = context
- self.image_meta = image_meta
-
- def execute(self):
- return self.disk_dvr.create_disk_from_image(
- self.context, self.instance, self.image_meta)
-
- def revert(self, result, flow_failures):
- # If there is no result, or its a direct failure, then there isn't
- # anything to delete.
- if result is None or isinstance(result, task_fail.Failure):
- return
-
- # Run the delete. The result is a single disk. Wrap into list
- # as the method works with plural disks.
- try:
- self.disk_dvr.delete_disks([result])
- except pvm_exc.Error:
- # Don't allow revert exceptions to interrupt the revert flow.
- LOG.exception("Disk deletion failed during revert. Ignoring.",
- instance=self.instance)
-
-
-class AttachDisk(task.Task):
-
- """The task to attach the disk to the instance."""
-
- def __init__(self, disk_dvr, instance, stg_ftsk):
- """Create the Task for the attach disk to instance method.
-
- Requires disk info through requirement of disk_dev_info (provided by
- crt_disk_from_img)
-
- :param disk_dvr: The disk driver.
- :param instance: The nova instance.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- """
- super(AttachDisk, self).__init__(
- name='attach_disk', requires=['disk_dev_info'])
- self.disk_dvr = disk_dvr
- self.instance = instance
- self.stg_ftsk = stg_ftsk
-
- def execute(self, disk_dev_info):
- self.disk_dvr.attach_disk(self.instance, disk_dev_info, self.stg_ftsk)
-
- def revert(self, disk_dev_info, result, flow_failures):
- try:
- self.disk_dvr.detach_disk(self.instance)
- except pvm_exc.Error:
- # Don't allow revert exceptions to interrupt the revert flow.
- LOG.exception("Disk detach failed during revert. Ignoring.",
- instance=self.instance)
-
-
-class DetachDisk(task.Task):
-
- """The task to detach the disk storage from the instance."""
-
- def __init__(self, disk_dvr, instance):
- """Creates the Task to detach the storage adapters.
-
- Provides the stor_adpt_mappings. A list of pypowervm
- VSCSIMappings or VFCMappings (depending on the storage adapter).
-
- :param disk_dvr: The DiskAdapter for the VM.
- :param instance: The nova instance.
- """
- super(DetachDisk, self).__init__(
- name='detach_disk', provides='stor_adpt_mappings')
- self.instance = instance
- self.disk_dvr = disk_dvr
-
- def execute(self):
- return self.disk_dvr.detach_disk(self.instance)
-
-
-class DeleteDisk(task.Task):
-
- """The task to delete the backing storage."""
-
- def __init__(self, disk_dvr):
- """Creates the Task to delete the disk storage from the system.
-
- Requires the stor_adpt_mappings.
-
- :param disk_dvr: The DiskAdapter for the VM.
- """
- super(DeleteDisk, self).__init__(
- name='delete_disk', requires=['stor_adpt_mappings'])
- self.disk_dvr = disk_dvr
-
- def execute(self, stor_adpt_mappings):
- self.disk_dvr.delete_disks(stor_adpt_mappings)
-
-
-class CreateAndConnectCfgDrive(task.Task):
-
- """The task to create the config drive."""
-
- def __init__(self, adapter, instance, injected_files,
- network_info, stg_ftsk, admin_pass=None):
- """Create the Task that creates and connects the config drive.
-
- Requires the 'mgmt_cna'
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance
- :param injected_files: A list of file paths that will be injected into
- the ISO.
- :param network_info: The network_info from the nova spawn method.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- :param admin_pass (Optional, Default None): Password to inject for the
- VM.
- """
- super(CreateAndConnectCfgDrive, self).__init__(
- name='cfg_drive', requires=['mgmt_cna'])
- self.adapter = adapter
- self.instance = instance
- self.injected_files = injected_files
- self.network_info = network_info
- self.stg_ftsk = stg_ftsk
- self.ad_pass = admin_pass
- self.mb = None
-
- def execute(self, mgmt_cna):
- self.mb = media.ConfigDrivePowerVM(self.adapter)
- self.mb.create_cfg_drv_vopt(self.instance, self.injected_files,
- self.network_info, self.stg_ftsk,
- admin_pass=self.ad_pass, mgmt_cna=mgmt_cna)
-
- def revert(self, mgmt_cna, result, flow_failures):
- # No media builder, nothing to do
- if self.mb is None:
- return
-
- # Delete the virtual optical media. We don't care if it fails
- try:
- self.mb.dlt_vopt(self.instance, self.stg_ftsk)
- except pvm_exc.Error:
- LOG.exception('VOpt removal (as part of reversion) failed.',
- instance=self.instance)
-
-
-class DeleteVOpt(task.Task):
-
- """The task to delete the virtual optical."""
-
- def __init__(self, adapter, instance, stg_ftsk=None):
- """Creates the Task to delete the instance's virtual optical media.
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- """
- super(DeleteVOpt, self).__init__(name='vopt_delete')
- self.adapter = adapter
- self.instance = instance
- self.stg_ftsk = stg_ftsk
-
- def execute(self):
- media_builder = media.ConfigDrivePowerVM(self.adapter)
- media_builder.dlt_vopt(self.instance, stg_ftsk=self.stg_ftsk)
-
-
-class InstanceDiskToMgmt(task.Task):
-
- """The task to connect an instance's disk to the management partition.
-
- This task will connect the instance's disk to the management partition and
- discover it. We do these two pieces together because their reversion
- happens in the same order.
- """
-
- def __init__(self, disk_dvr, instance):
- """Create the Task for connecting boot disk to mgmt partition.
-
- Provides:
- stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
- connected.
- vios_wrap: The Virtual I/O Server wrapper from which the storage
- element was mapped.
- disk_path: The local path to the mapped-and-discovered device, e.g.
- '/dev/sde'.
-
- :param disk_dvr: The disk driver.
- :param instance: The nova instance whose boot disk is to be connected.
- """
- super(InstanceDiskToMgmt, self).__init__(
- name='instance_disk_to_mgmt',
- provides=['stg_elem', 'vios_wrap', 'disk_path'])
- self.disk_dvr = disk_dvr
- self.instance = instance
- self.stg_elem = None
- self.vios_wrap = None
- self.disk_path = None
-
- def execute(self):
- """Map the instance's boot disk and discover it."""
-
- # Search for boot disk on the NovaLink partition.
- if self.disk_dvr.mp_uuid in self.disk_dvr._vios_uuids:
- dev_name = self.disk_dvr.get_bootdisk_path(
- self.instance, self.disk_dvr.mp_uuid)
- if dev_name is not None:
- return None, None, dev_name
-
- self.stg_elem, self.vios_wrap = (
- self.disk_dvr.connect_instance_disk_to_mgmt(self.instance))
- new_maps = pvm_smap.find_maps(
- self.vios_wrap.scsi_mappings, client_lpar_id=self.disk_dvr.mp_uuid,
- stg_elem=self.stg_elem)
- if not new_maps:
- raise exception.NewMgmtMappingNotFoundException(
- stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name)
-
- # new_maps should be length 1, but even if it's not - i.e. we somehow
- # matched more than one mapping of the same dev to the management
- # partition from the same VIOS - it is safe to use the first one.
- mapping = new_maps[0]
- # Scan the SCSI bus, discover the disk, find its canonical path.
- LOG.info("Discovering device and path for mapping of %(dev_name)s "
- "on the management partition.",
- {'dev_name': self.stg_elem.name}, instance=self.instance)
- self.disk_path = mgmt.discover_vscsi_disk(mapping)
- return self.stg_elem, self.vios_wrap, self.disk_path
-
- def revert(self, result, flow_failures):
- """Unmap the disk and then remove it from the management partition.
-
- We use this order to avoid rediscovering the device in case some other
- thread scans the SCSI bus between when we remove and when we unmap.
- """
- if self.vios_wrap is None or self.stg_elem is None:
- # We never even got connected - nothing to do.
- return
- LOG.warning("Unmapping boot disk %(disk_name)s from the management "
- "partition via Virtual I/O Server %(vioname)s.",
- {'disk_name': self.stg_elem.name,
- 'vioname': self.vios_wrap.name}, instance=self.instance)
- self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid,
- self.stg_elem.name)
-
- if self.disk_path is None:
- # We did not discover the disk - nothing else to do.
- return
- LOG.warning("Removing disk %(dpath)s from the management partition.",
- {'dpath': self.disk_path}, instance=self.instance)
- try:
- mgmt.remove_block_dev(self.disk_path)
- except pvm_exc.Error:
- # Don't allow revert exceptions to interrupt the revert flow.
- LOG.exception("Remove disk failed during revert. Ignoring.",
- instance=self.instance)
-
-
-class RemoveInstanceDiskFromMgmt(task.Task):
-
- """Unmap and remove an instance's boot disk from the mgmt partition."""
-
- def __init__(self, disk_dvr, instance):
- """Create task to unmap and remove an instance's boot disk from mgmt.
-
- Requires (from InstanceDiskToMgmt):
- stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
- connected.
- vios_wrap: The Virtual I/O Server wrapper.
- (pypowervm.wrappers.virtual_io_server.VIOS) from which the
- storage element was mapped.
- disk_path: The local path to the mapped-and-discovered device, e.g.
- '/dev/sde'.
- :param disk_dvr: The disk driver.
- :param instance: The nova instance whose boot disk is to be connected.
- """
- self.disk_dvr = disk_dvr
- self.instance = instance
- super(RemoveInstanceDiskFromMgmt, self).__init__(
- name='remove_inst_disk_from_mgmt',
- requires=['stg_elem', 'vios_wrap', 'disk_path'])
-
- def execute(self, stg_elem, vios_wrap, disk_path):
- """Unmap and remove an instance's boot disk from the mgmt partition.
-
- Input parameters ('requires') provided by InstanceDiskToMgmt task.
- :param stg_elem: The storage element wrapper (pypowervm LU, PV, etc.)
- to be disconnected.
- :param vios_wrap: The Virtual I/O Server wrapper from which the
- mapping is to be removed.
- :param disk_path: The local path to the disk device to be removed, e.g.
- '/dev/sde'
- """
- # stg_elem is None if boot disk was not mapped to management partition.
- if stg_elem is None:
- return
- LOG.info("Unmapping boot disk %(disk_name)s from the management "
- "partition via Virtual I/O Server %(vios_name)s.",
- {'disk_name': stg_elem.name, 'vios_name': vios_wrap.name},
- instance=self.instance)
- self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
- LOG.info("Removing disk %(disk_path)s from the management partition.",
- {'disk_path': disk_path}, instance=self.instance)
- mgmt.remove_block_dev(disk_path)
diff --git a/nova/virt/powervm/tasks/vm.py b/nova/virt/powervm/tasks/vm.py
deleted file mode 100644
index 59cd1a2377..0000000000
--- a/nova/virt/powervm/tasks/vm.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from pypowervm import exceptions as pvm_exc
-from pypowervm.tasks import storage as pvm_stg
-from taskflow import task
-from taskflow.types import failure as task_fail
-
-from nova.virt.powervm import vm
-
-
-LOG = logging.getLogger(__name__)
-
-
-class Get(task.Task):
-
- """The task for getting a VM entry."""
-
- def __init__(self, adapter, instance):
- """Creates the Task for getting a VM entry.
-
- Provides the 'lpar_wrap' for other tasks.
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance.
- """
- super(Get, self).__init__(name='get_vm', provides='lpar_wrap')
- self.adapter = adapter
- self.instance = instance
-
- def execute(self):
- return vm.get_instance_wrapper(self.adapter, self.instance)
-
-
-class Create(task.Task):
- """The task for creating a VM."""
-
- def __init__(self, adapter, host_wrapper, instance, stg_ftsk):
- """Creates the Task for creating a VM.
-
- The revert method only needs to do something for failed rebuilds.
- Since the rebuild and build methods have different flows, it is
- necessary to clean up the destination LPAR on fails during rebuild.
-
- The revert method is not implemented for build because the compute
- manager calls the driver destroy operation for spawn errors. By
- not deleting the lpar, it's a cleaner flow through the destroy
- operation and accomplishes the same result.
-
- Any stale storage associated with the new VM's (possibly recycled) ID
- will be cleaned up. The cleanup work will be delegated to the FeedTask
- represented by the stg_ftsk parameter.
-
- :param adapter: The adapter for the pypowervm API
- :param host_wrapper: The managed system wrapper
- :param instance: The nova instance.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- """
- super(Create, self).__init__(name='crt_vm', provides='lpar_wrap')
- self.instance = instance
- self.adapter = adapter
- self.host_wrapper = host_wrapper
- self.stg_ftsk = stg_ftsk
-
- def execute(self):
- wrap = vm.create_lpar(self.adapter, self.host_wrapper, self.instance)
- # Get rid of any stale storage and/or mappings associated with the new
- # LPAR's ID, so it doesn't accidentally have access to something it
- # oughtn't.
- LOG.info('Scrubbing stale storage.', instance=self.instance)
- pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], self.stg_ftsk,
- lpars_exist=True)
- return wrap
-
-
-class PowerOn(task.Task):
- """The task to power on the instance."""
-
- def __init__(self, adapter, instance):
- """Create the Task for the power on of the LPAR.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- """
- super(PowerOn, self).__init__(name='pwr_vm')
- self.adapter = adapter
- self.instance = instance
-
- def execute(self):
- vm.power_on(self.adapter, self.instance)
-
- def revert(self, result, flow_failures):
- if isinstance(result, task_fail.Failure):
- # The power on itself failed...can't power off.
- LOG.debug('Power on failed. Not performing power off.',
- instance=self.instance)
- return
-
- LOG.warning('Powering off instance.', instance=self.instance)
- try:
- vm.power_off(self.adapter, self.instance, force_immediate=True)
- except pvm_exc.Error:
- # Don't raise revert exceptions
- LOG.exception("Power-off failed during revert.",
- instance=self.instance)
-
-
-class PowerOff(task.Task):
- """The task to power off a VM."""
-
- def __init__(self, adapter, instance, force_immediate=False):
- """Creates the Task to power off an LPAR.
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance.
- :param force_immediate: Boolean. Perform a VSP hard power off.
- """
- super(PowerOff, self).__init__(name='pwr_off_vm')
- self.instance = instance
- self.adapter = adapter
- self.force_immediate = force_immediate
-
- def execute(self):
- vm.power_off(self.adapter, self.instance,
- force_immediate=self.force_immediate)
-
-
-class Delete(task.Task):
- """The task to delete the instance from the system."""
-
- def __init__(self, adapter, instance):
- """Create the Task to delete the VM from the system.
-
- :param adapter: The adapter for the pypowervm API.
- :param instance: The nova instance.
- """
- super(Delete, self).__init__(name='dlt_vm')
- self.adapter = adapter
- self.instance = instance
-
- def execute(self):
- vm.delete_lpar(self.adapter, self.instance)
diff --git a/nova/virt/powervm/vif.py b/nova/virt/powervm/vif.py
deleted file mode 100644
index 8ab591a15d..0000000000
--- a/nova/virt/powervm/vif.py
+++ /dev/null
@@ -1,373 +0,0 @@
-# Copyright 2016, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-from oslo_log import log
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-from oslo_utils import importutils
-from pypowervm import exceptions as pvm_ex
-from pypowervm.tasks import cna as pvm_cna
-from pypowervm.tasks import partition as pvm_par
-from pypowervm.wrappers import event as pvm_evt
-
-from nova import exception
-from nova.network import model as network_model
-from nova.virt.powervm import vm
-
-LOG = log.getLogger(__name__)
-
-NOVALINK_VSWITCH = 'NovaLinkVEABridge'
-
-# Provider tag for custom events from this module
-EVENT_PROVIDER_ID = 'NOVA_PVM_VIF'
-
-VIF_TYPE_PVM_SEA = 'pvm_sea'
-VIF_TYPE_PVM_OVS = 'ovs'
-VIF_MAPPING = {VIF_TYPE_PVM_SEA:
- 'nova.virt.powervm.vif.PvmSeaVifDriver',
- VIF_TYPE_PVM_OVS:
- 'nova.virt.powervm.vif.PvmOvsVifDriver'}
-
-
-def _build_vif_driver(adapter, instance, vif):
- """Returns the appropriate VIF Driver for the given VIF.
-
- :param adapter: The pypowervm adapter API interface.
- :param instance: The nova instance.
- :param vif: The virtual interface.
- :return: The appropriate PvmVifDriver for the VIF.
- """
- if vif.get('type') is None:
- LOG.exception("Failed to build vif driver. Missing vif type.",
- instance=instance)
- raise exception.VirtualInterfacePlugException()
-
- # Check the type to the implementations
- if VIF_MAPPING.get(vif['type']):
- return importutils.import_object(
- VIF_MAPPING.get(vif['type']), adapter, instance)
-
- # No matching implementation, raise error.
- LOG.exception("Failed to build vif driver. Invalid vif type provided.",
- instance=instance)
- raise exception.VirtualInterfacePlugException()
-
-
-def _push_vif_event(adapter, action, vif_w, instance, vif_type):
- """Push a custom event to the REST server for a vif action (plug/unplug).
-
- This event prompts the neutron agent to mark the port up or down. It is
- consumed by custom neutron agents (e.g. Shared Ethernet Adapter)
-
- :param adapter: The pypowervm adapter.
- :param action: The action taken on the vif - either 'plug' or 'unplug'
- :param vif_w: The pypowervm wrapper of the affected vif (CNA, VNIC, etc.)
- :param instance: The nova instance for the event
- :param vif_type: The type of event source (pvm_sea, ovs, bridge,
- pvm_sriov etc)
- """
- data = vif_w.href
- detail = jsonutils.dumps(dict(provider=EVENT_PROVIDER_ID, action=action,
- mac=vif_w.mac, type=vif_type))
- event = pvm_evt.Event.bld(adapter, data, detail)
- try:
- event = event.create()
- LOG.debug('Pushed custom event for consumption by neutron agent: %s',
- str(event), instance=instance)
- except Exception:
- with excutils.save_and_reraise_exception(logger=LOG):
- LOG.exception('Custom VIF event push failed. %s', str(event),
- instance=instance)
-
-
-def plug(adapter, instance, vif, new_vif=True):
- """Plugs a virtual interface (network) into a VM.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance object.
- :param vif: The virtual interface to plug into the instance.
- :param new_vif: (Optional, Default: True) If set, indicates that it is
- a brand new VIF. If False, it indicates that the VIF
- is already on the client but should be treated on the
- bridge.
- :return: The wrapper (CNA) representing the plugged virtual network. None
- if the vnet was not created.
- """
- vif_drv = _build_vif_driver(adapter, instance, vif)
-
- try:
- vnet_w = vif_drv.plug(vif, new_vif=new_vif)
- except pvm_ex.HttpError:
- LOG.exception('VIF plug failed for instance.', instance=instance)
- raise exception.VirtualInterfacePlugException()
- # Other exceptions are (hopefully) custom VirtualInterfacePlugException
- # generated lower in the call stack.
-
- # Push a custom event if we really plugged the vif
- if vnet_w is not None:
- _push_vif_event(adapter, 'plug', vnet_w, instance, vif['type'])
-
- return vnet_w
-
-
-def unplug(adapter, instance, vif, cna_w_list=None):
- """Unplugs a virtual interface (network) from a VM.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance object.
- :param vif: The virtual interface to plug into the instance.
- :param cna_w_list: (Optional, Default: None) The list of Client Network
- Adapters from pypowervm. Providing this input
- allows for an improvement in operation speed.
- """
- vif_drv = _build_vif_driver(adapter, instance, vif)
- try:
- vnet_w = vif_drv.unplug(vif, cna_w_list=cna_w_list)
- except pvm_ex.HttpError as he:
- LOG.exception('VIF unplug failed for instance', instance=instance)
- raise exception.VirtualInterfaceUnplugException(reason=he.args[0])
-
- # Push a custom event if we successfully unplugged the vif.
- if vnet_w:
- _push_vif_event(adapter, 'unplug', vnet_w, instance, vif['type'])
-
-
-class PvmVifDriver(metaclass=abc.ABCMeta):
- """Represents an abstract class for a PowerVM Vif Driver.
-
- A VIF Driver understands a given virtual interface type (network). It
- understands how to plug and unplug a given VIF for a virtual machine.
- """
-
- def __init__(self, adapter, instance):
- """Initializes a VIF Driver.
- :param adapter: The pypowervm adapter API interface.
- :param instance: The nova instance that the vif action will be run
- against.
- """
- self.adapter = adapter
- self.instance = instance
-
- @abc.abstractmethod
- def plug(self, vif, new_vif=True):
- """Plugs a virtual interface (network) into a VM.
-
- :param vif: The virtual interface to plug into the instance.
- :param new_vif: (Optional, Default: True) If set, indicates that it is
- a brand new VIF. If False, it indicates that the VIF
- is already on the client but should be treated on the
- bridge.
- :return: The new vif that was created. Only returned if new_vif is
- set to True. Otherwise None is expected.
- """
- pass
-
- def unplug(self, vif, cna_w_list=None):
- """Unplugs a virtual interface (network) from a VM.
-
- :param vif: The virtual interface to plug into the instance.
- :param cna_w_list: (Optional, Default: None) The list of Client Network
- Adapters from pypowervm. Providing this input
- allows for an improvement in operation speed.
- :return cna_w: The deleted Client Network Adapter or None if the CNA
- is not found.
- """
- # This is a default implementation that most implementations will
- # require.
-
- # Need to find the adapters if they were not provided
- if not cna_w_list:
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
-
- cna_w = self._find_cna_for_vif(cna_w_list, vif)
- if not cna_w:
- LOG.warning('Unable to unplug VIF with mac %(mac)s. The VIF was '
- 'not found on the instance.',
- {'mac': vif['address']}, instance=self.instance)
- return None
-
- LOG.info('Deleting VIF with mac %(mac)s.',
- {'mac': vif['address']}, instance=self.instance)
- try:
- cna_w.delete()
- except Exception as e:
- LOG.exception('Unable to unplug VIF with mac %(mac)s.',
- {'mac': vif['address']}, instance=self.instance)
- raise exception.VirtualInterfaceUnplugException(
- reason=str(e))
- return cna_w
-
- @staticmethod
- def _find_cna_for_vif(cna_w_list, vif):
- """Finds the PowerVM CNA for a given Nova VIF.
-
- :param cna_w_list: The list of Client Network Adapter wrappers from
- pypowervm.
- :param vif: The Nova Virtual Interface (virtual network interface).
- :return: The CNA that corresponds to the VIF. None if one is not
- part of the cna_w_list.
- """
- for cna_w in cna_w_list:
- if vm.norm_mac(cna_w.mac) == vif['address']:
- return cna_w
- return None
-
-
-class PvmOvsVifDriver(PvmVifDriver):
- """The Open vSwitch VIF driver for PowerVM."""
-
- def plug(self, vif, new_vif=True):
- """Plugs a virtual interface (network) into a VM.
-
- Creates a 'peer to peer' connection between the Management partition
- hosting the Linux I/O and the client VM. There will be one trunk
- adapter for a given client adapter.
-
- The device will be 'up' on the mgmt partition.
-
- Will make sure that the trunk device has the appropriate metadata (e.g.
- port id) set on it so that the Open vSwitch agent picks it up properly.
-
- :param vif: The virtual interface to plug into the instance.
- :param new_vif: (Optional, Default: True) If set, indicates that it is
- a brand new VIF. If False, it indicates that the VIF
- is already on the client but should be treated on the
- bridge.
- :return: The new vif that was created. Only returned if new_vif is
- set to True. Otherwise None is expected.
- """
-
- # Create the trunk and client adapter.
- lpar_uuid = vm.get_pvm_uuid(self.instance)
- mgmt_uuid = pvm_par.get_this_partition(self.adapter).uuid
-
- mtu = vif['network'].get_meta('mtu')
- if 'devname' in vif:
- dev_name = vif['devname']
- else:
- dev_name = ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
-
- meta_attrs = ','.join([
- 'iface-id=%s' % (vif.get('ovs_interfaceid') or vif['id']),
- 'iface-status=active',
- 'attached-mac=%s' % vif['address'],
- 'vm-uuid=%s' % self.instance.uuid])
-
- if new_vif:
- return pvm_cna.crt_p2p_cna(
- self.adapter, None, lpar_uuid, [mgmt_uuid], NOVALINK_VSWITCH,
- crt_vswitch=True, mac_addr=vif['address'], dev_name=dev_name,
- ovs_bridge=vif['network']['bridge'],
- ovs_ext_ids=meta_attrs, configured_mtu=mtu)[0]
- else:
- # Bug : https://bugs.launchpad.net/nova-powervm/+bug/1731548
- # When a host is rebooted, something is discarding tap devices for
- # VMs deployed with OVS vif. To prevent VMs losing network
- # connectivity, this is fixed by recreating the tap devices during
- # init of the nova compute service, which will call vif plug with
- # new_vif==False.
-
- # Find the CNA for this vif.
- # TODO(esberglu) improve performance by caching VIOS wrapper(s) and
- # CNA lists (in case >1 vif per VM).
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
- cna_w = self._find_cna_for_vif(cna_w_list, vif)
- if not cna_w:
- LOG.warning('Unable to plug VIF with mac %s for instance. The '
- 'VIF was not found on the instance.',
- vif['address'], instance=self.instance)
- return None
-
- # Find the corresponding trunk adapter
- trunks = pvm_cna.find_trunks(self.adapter, cna_w)
- for trunk in trunks:
- # Set MTU, OVS external ids, and OVS bridge metadata
- trunk.configured_mtu = mtu
- trunk.ovs_ext_ids = meta_attrs
- trunk.ovs_bridge = vif['network']['bridge']
- # Updating the trunk adapter will cause NovaLink to reassociate
- # the tap device.
- trunk.update()
-
- def unplug(self, vif, cna_w_list=None):
- """Unplugs a virtual interface (network) from a VM.
-
- Extends the base implementation, but before calling it will remove
- the adapter from the Open vSwitch and delete the trunk.
-
- :param vif: The virtual interface to plug into the instance.
- :param cna_w_list: (Optional, Default: None) The list of Client Network
- Adapters from pypowervm. Providing this input
- allows for an improvement in operation speed.
- :return cna_w: The deleted Client Network Adapter or None if the CNA
- is not found.
- """
- # Need to find the adapters if they were not provided
- if not cna_w_list:
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
-
- # Find the CNA for this vif.
- cna_w = self._find_cna_for_vif(cna_w_list, vif)
-
- if not cna_w:
- LOG.warning('Unable to unplug VIF with mac %s for instance. The '
- 'VIF was not found on the instance.', vif['address'],
- instance=self.instance)
- return None
-
- # Find and delete the trunk adapters
- trunks = pvm_cna.find_trunks(self.adapter, cna_w)
- for trunk in trunks:
- trunk.delete()
-
- # Delete the client CNA
- return super(PvmOvsVifDriver, self).unplug(vif, cna_w_list=cna_w_list)
-
-
-class PvmSeaVifDriver(PvmVifDriver):
- """The PowerVM Shared Ethernet Adapter VIF Driver."""
-
- def plug(self, vif, new_vif=True):
- """Plugs a virtual interface (network) into a VM.
-
- This method simply creates the client network adapter into the VM.
-
- :param vif: The virtual interface to plug into the instance.
- :param new_vif: (Optional, Default: True) If set, indicates that it is
- a brand new VIF. If False, it indicates that the VIF
- is already on the client but should be treated on the
- bridge.
- :return: The new vif that was created. Only returned if new_vif is
- set to True. Otherwise None is expected.
- """
- # Do nothing if not a new VIF
- if not new_vif:
- return None
-
- lpar_uuid = vm.get_pvm_uuid(self.instance)
-
- # CNA's require a VLAN. The networking-powervm neutron agent puts this
- # in the vif details.
- vlan = int(vif['details']['vlan'])
-
- LOG.debug("Creating SEA-based VIF with VLAN %s", str(vlan),
- instance=self.instance)
- cna_w = pvm_cna.crt_cna(self.adapter, None, lpar_uuid, vlan,
- mac_addr=vif['address'])
-
- return cna_w
diff --git a/nova/virt/powervm/vm.py b/nova/virt/powervm/vm.py
deleted file mode 100644
index 2e5247551f..0000000000
--- a/nova/virt/powervm/vm.py
+++ /dev/null
@@ -1,543 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-
-from oslo_concurrency import lockutils
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-from oslo_utils import strutils as stru
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as pvm_log
-from pypowervm.tasks import power
-from pypowervm.tasks import power_opts as popts
-from pypowervm.tasks import vterm
-from pypowervm import util as pvm_u
-from pypowervm.utils import lpar_builder as lpar_bldr
-from pypowervm.utils import uuid as pvm_uuid
-from pypowervm.utils import validation as pvm_vldn
-from pypowervm.wrappers import base_partition as pvm_bp
-from pypowervm.wrappers import logical_partition as pvm_lpar
-from pypowervm.wrappers import network as pvm_net
-from pypowervm.wrappers import shared_proc_pool as pvm_spp
-
-from nova.compute import power_state
-from nova import conf
-from nova import exception as exc
-from nova.i18n import _
-from nova.virt import hardware
-
-
-CONF = conf.CONF
-LOG = logging.getLogger(__name__)
-
-_POWERVM_STARTABLE_STATE = (pvm_bp.LPARState.NOT_ACTIVATED,)
-_POWERVM_STOPPABLE_STATE = (
- pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING,
- pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN,
- pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING,
- pvm_bp.LPARState.SUSPENDING)
-_POWERVM_TO_NOVA_STATE = {
- pvm_bp.LPARState.MIGRATING_RUNNING: power_state.RUNNING,
- pvm_bp.LPARState.RUNNING: power_state.RUNNING,
- pvm_bp.LPARState.STARTING: power_state.RUNNING,
- # map open firmware state to active since it can be shut down
- pvm_bp.LPARState.OPEN_FIRMWARE: power_state.RUNNING,
- # It is running until it is off.
- pvm_bp.LPARState.SHUTTING_DOWN: power_state.RUNNING,
- # It is running until the suspend completes
- pvm_bp.LPARState.SUSPENDING: power_state.RUNNING,
-
- pvm_bp.LPARState.MIGRATING_NOT_ACTIVE: power_state.SHUTDOWN,
- pvm_bp.LPARState.NOT_ACTIVATED: power_state.SHUTDOWN,
-
- pvm_bp.LPARState.UNKNOWN: power_state.NOSTATE,
- pvm_bp.LPARState.HARDWARE_DISCOVERY: power_state.NOSTATE,
- pvm_bp.LPARState.NOT_AVAILBLE: power_state.NOSTATE,
-
- # While resuming, we should be considered suspended still. Only once
- # resumed will we be active (which is represented by the RUNNING state)
- pvm_bp.LPARState.RESUMING: power_state.SUSPENDED,
- pvm_bp.LPARState.SUSPENDED: power_state.SUSPENDED,
-
- pvm_bp.LPARState.ERROR: power_state.CRASHED}
-
-
-def get_cnas(adapter, instance, **search):
- """Returns the (possibly filtered) current CNAs on the instance.
-
- The Client Network Adapters are the Ethernet adapters for a VM.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- :param search: Keyword arguments for CNA.search. If omitted, all CNAs are
- returned.
- :return The CNA wrappers that represent the ClientNetworkAdapters on the VM
- """
- meth = pvm_net.CNA.search if search else pvm_net.CNA.get
-
- return meth(adapter, parent_type=pvm_lpar.LPAR,
- parent_uuid=get_pvm_uuid(instance), **search)
-
-
-def get_lpar_names(adp):
- """Get a list of the LPAR names.
-
- :param adp: A pypowervm.adapter.Adapter instance for the PowerVM API.
- :return: A list of string names of the PowerVM Logical Partitions.
- """
- return [x.name for x in pvm_lpar.LPAR.search(adp, is_mgmt_partition=False)]
-
-
-def get_pvm_uuid(instance):
- """Get the corresponding PowerVM VM uuid of an instance uuid.
-
- Maps a OpenStack instance uuid to a PowerVM uuid. The UUID between the
- Nova instance and PowerVM will be 1 to 1 mapped. This method runs the
- algorithm against the instance's uuid to convert it to the PowerVM
- UUID.
-
- :param instance: nova.objects.instance.Instance.
- :return: The PowerVM UUID for the LPAR corresponding to the instance.
- """
- return pvm_uuid.convert_uuid_to_pvm(instance.uuid).upper()
-
-
-def get_instance_wrapper(adapter, instance):
- """Get the LPAR wrapper for a given Nova instance.
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance.
- :return: The pypowervm logical_partition wrapper.
- """
- pvm_inst_uuid = get_pvm_uuid(instance)
- try:
- return pvm_lpar.LPAR.get(adapter, uuid=pvm_inst_uuid)
- except pvm_exc.Error as e:
- with excutils.save_and_reraise_exception(logger=LOG) as sare:
- LOG.exception("Failed to retrieve LPAR associated with instance.",
- instance=instance)
- if e.response is not None and e.response.status == 404:
- sare.reraise = False
- raise exc.InstanceNotFound(instance_id=pvm_inst_uuid)
-
-
-def power_on(adapter, instance):
- """Powers on a VM.
-
- :param adapter: A pypowervm.adapter.Adapter.
- :param instance: The nova instance to power on.
- :raises: InstancePowerOnFailure
- """
- # Synchronize power-on and power-off ops on a given instance
- with lockutils.lock('power_%s' % instance.uuid):
- entry = get_instance_wrapper(adapter, instance)
- # Get the current state and see if we can start the VM
- if entry.state in _POWERVM_STARTABLE_STATE:
- # Now start the lpar
- try:
- power.power_on(entry, None)
- except pvm_exc.Error as e:
- LOG.exception("PowerVM error during power_on.",
- instance=instance)
- raise exc.InstancePowerOnFailure(reason=str(e))
-
-
-def power_off(adapter, instance, force_immediate=False, timeout=None):
- """Powers off a VM.
-
- :param adapter: A pypowervm.adapter.Adapter.
- :param instance: The nova instance to power off.
- :param timeout: (Optional, Default None) How long to wait for the job
- to complete. By default, is None which indicates it should
- use the default from pypowervm's power off method.
- :param force_immediate: (Optional, Default False) Should it be immediately
- shut down.
- :raises: InstancePowerOffFailure
- """
- # Synchronize power-on and power-off ops on a given instance
- with lockutils.lock('power_%s' % instance.uuid):
- entry = get_instance_wrapper(adapter, instance)
- # Get the current state and see if we can stop the VM
- LOG.debug("Powering off request for instance in state %(state)s. "
- "Force Immediate Flag: %(force)s.",
- {'state': entry.state, 'force': force_immediate},
- instance=instance)
- if entry.state in _POWERVM_STOPPABLE_STATE:
- # Now stop the lpar
- try:
- LOG.debug("Power off executing.", instance=instance)
- kwargs = {'timeout': timeout} if timeout else {}
- if force_immediate:
- power.PowerOp.stop(
- entry, opts=popts.PowerOffOpts().vsp_hard(), **kwargs)
- else:
- power.power_off_progressive(entry, **kwargs)
- except pvm_exc.Error as e:
- LOG.exception("PowerVM error during power_off.",
- instance=instance)
- raise exc.InstancePowerOffFailure(reason=str(e))
- else:
- LOG.debug("Power off not required for instance %(inst)s.",
- {'inst': instance.name})
-
-
-def reboot(adapter, instance, hard):
- """Reboots a VM.
-
- :param adapter: A pypowervm.adapter.Adapter.
- :param instance: The nova instance to reboot.
- :param hard: Boolean True if hard reboot, False otherwise.
- :raises: InstanceRebootFailure
- """
- # Synchronize power-on and power-off ops on a given instance
- with lockutils.lock('power_%s' % instance.uuid):
- try:
- entry = get_instance_wrapper(adapter, instance)
- if entry.state != pvm_bp.LPARState.NOT_ACTIVATED:
- if hard:
- power.PowerOp.stop(
- entry, opts=popts.PowerOffOpts().vsp_hard().restart())
- else:
- power.power_off_progressive(entry, restart=True)
- else:
- # pypowervm does NOT throw an exception if "already down".
- # Any other exception from pypowervm is a legitimate failure;
- # let it raise up.
- # If we get here, pypowervm thinks the instance is down.
- power.power_on(entry, None)
- except pvm_exc.Error as e:
- LOG.exception("PowerVM error during reboot.", instance=instance)
- raise exc.InstanceRebootFailure(reason=str(e))
-
-
-def delete_lpar(adapter, instance):
- """Delete an LPAR.
-
- :param adapter: The adapter for the pypowervm API.
- :param instance: The nova instance corresponding to the lpar to delete.
- """
- lpar_uuid = get_pvm_uuid(instance)
- # Attempt to delete the VM. To avoid failures due to open vterm, we will
- # attempt to close the vterm before issuing the delete.
- try:
- LOG.info('Deleting virtual machine.', instance=instance)
- # Ensure any vterms are closed. Will no-op otherwise.
- vterm.close_vterm(adapter, lpar_uuid)
- # Run the LPAR delete
- resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
- LOG.info('Virtual machine delete status: %d', resp.status,
- instance=instance)
- return resp
- except pvm_exc.HttpError as e:
- with excutils.save_and_reraise_exception(logger=LOG) as sare:
- if e.response and e.response.status == 404:
- # LPAR is already gone - don't fail
- sare.reraise = False
- LOG.info('Virtual Machine not found', instance=instance)
- else:
- LOG.error('HttpError deleting virtual machine.',
- instance=instance)
- except pvm_exc.Error:
- with excutils.save_and_reraise_exception(logger=LOG):
- # Attempting to close vterm did not help so raise exception
- LOG.error('Virtual machine delete failed: LPARID=%s', lpar_uuid)
-
-
-def create_lpar(adapter, host_w, instance):
- """Create an LPAR based on the host based on the instance.
-
- :param adapter: The adapter for the pypowervm API.
- :param host_w: The host's System wrapper.
- :param instance: The nova instance.
- :return: The LPAR wrapper response from the API.
- """
- try:
- # Translate the nova flavor into a PowerVM Wrapper Object.
- lpar_b = VMBuilder(host_w, adapter).lpar_builder(instance)
- pending_lpar_w = lpar_b.build()
- # Run validation against it. This is just for nice(r) error messages.
- pvm_vldn.LPARWrapperValidator(pending_lpar_w,
- host_w).validate_all()
- # Create it. The API returns a new wrapper with the actual system data.
- return pending_lpar_w.create(parent=host_w)
- except lpar_bldr.LPARBuilderException as e:
- # Raise the BuildAbortException since LPAR failed to build
- raise exc.BuildAbortException(instance_uuid=instance.uuid, reason=e)
- except pvm_exc.HttpError as he:
- # Raise the API exception
- LOG.exception("PowerVM HttpError creating LPAR.", instance=instance)
- raise exc.PowerVMAPIFailed(inst_name=instance.name, reason=he)
-
-
-def _translate_vm_state(pvm_state):
- """Find the current state of the lpar.
-
- :return: The appropriate integer state value from power_state, converted
- from the PowerVM state.
- """
- if pvm_state is None:
- return power_state.NOSTATE
- try:
- return _POWERVM_TO_NOVA_STATE[pvm_state.lower()]
- except KeyError:
- return power_state.NOSTATE
-
-
-def get_vm_qp(adapter, lpar_uuid, qprop=None, log_errors=True):
- """Returns one or all quick properties of an LPAR.
-
- :param adapter: The pypowervm adapter.
- :param lpar_uuid: The (powervm) UUID for the LPAR.
- :param qprop: The quick property key to return. If specified, that single
- property value is returned. If None/unspecified, all quick
- properties are returned in a dictionary.
- :param log_errors: Indicator whether to log REST data after an exception
- :return: Either a single quick property value or a dictionary of all quick
- properties.
- """
- kwds = dict(root_id=lpar_uuid, suffix_type='quick', suffix_parm=qprop)
- if not log_errors:
- # Remove the log helper from the list of helpers.
- # Note that adapter.helpers returns a copy - the .remove doesn't affect
- # the adapter's original helpers list.
- helpers = adapter.helpers
- try:
- helpers.remove(pvm_log.log_helper)
- except ValueError:
- # It's not an error if we didn't find it.
- pass
- kwds['helpers'] = helpers
- try:
- resp = adapter.read(pvm_lpar.LPAR.schema_type, **kwds)
- except pvm_exc.HttpError as e:
- with excutils.save_and_reraise_exception(logger=LOG) as sare:
- # 404 error indicates the LPAR has been deleted
- if e.response and e.response.status == 404:
- sare.reraise = False
- raise exc.InstanceNotFound(instance_id=lpar_uuid)
- # else raise the original exception
- return jsonutils.loads(resp.body)
-
-
-def get_vm_info(adapter, instance):
- """Get the InstanceInfo for an instance.
-
- :param adapter: The pypowervm.adapter.Adapter for the PowerVM REST API.
- :param instance: nova.objects.instance.Instance object
- :returns: An InstanceInfo object.
- """
- pvm_uuid = get_pvm_uuid(instance)
- pvm_state = get_vm_qp(adapter, pvm_uuid, 'PartitionState')
- nova_state = _translate_vm_state(pvm_state)
- return hardware.InstanceInfo(nova_state)
-
-
-def norm_mac(mac):
- """Normalizes a MAC address from pypowervm format to OpenStack.
-
- That means that the format will be converted to lower case and will
- have colons added.
-
- :param mac: A pypowervm mac address. Ex. 1234567890AB
- :return: A mac that matches the standard neutron format.
- Ex. 12:34:56:78:90:ab
- """
- # Need the replacement if the mac is already normalized.
- mac = mac.lower().replace(':', '')
- return ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2))
-
-
-class VMBuilder(object):
- """Converts a Nova Instance/Flavor into a pypowervm LPARBuilder."""
- _PVM_PROC_COMPAT = 'powervm:processor_compatibility'
- _PVM_UNCAPPED = 'powervm:uncapped'
- _PVM_DED_SHAR_MODE = 'powervm:dedicated_sharing_mode'
- _PVM_SHAR_PROC_POOL = 'powervm:shared_proc_pool_name'
- _PVM_SRR_CAPABILITY = 'powervm:srr_capability'
-
- # Map of PowerVM extra specs to the lpar builder attributes.
- # '' is used for attributes that are not implemented yet.
- # None means there is no direct attribute mapping and must
- # be handled individually
- _ATTRS_MAP = {
- 'powervm:min_mem': lpar_bldr.MIN_MEM,
- 'powervm:max_mem': lpar_bldr.MAX_MEM,
- 'powervm:min_vcpu': lpar_bldr.MIN_VCPU,
- 'powervm:max_vcpu': lpar_bldr.MAX_VCPU,
- 'powervm:proc_units': lpar_bldr.PROC_UNITS,
- 'powervm:min_proc_units': lpar_bldr.MIN_PROC_U,
- 'powervm:max_proc_units': lpar_bldr.MAX_PROC_U,
- 'powervm:dedicated_proc': lpar_bldr.DED_PROCS,
- 'powervm:shared_weight': lpar_bldr.UNCAPPED_WEIGHT,
- 'powervm:availability_priority': lpar_bldr.AVAIL_PRIORITY,
- _PVM_UNCAPPED: None,
- _PVM_DED_SHAR_MODE: None,
- _PVM_PROC_COMPAT: None,
- _PVM_SHAR_PROC_POOL: None,
- _PVM_SRR_CAPABILITY: None,
- }
-
- _DED_SHARING_MODES_MAP = {
- 'share_idle_procs': pvm_bp.DedicatedSharingMode.SHARE_IDLE_PROCS,
- 'keep_idle_procs': pvm_bp.DedicatedSharingMode.KEEP_IDLE_PROCS,
- 'share_idle_procs_active':
- pvm_bp.DedicatedSharingMode.SHARE_IDLE_PROCS_ACTIVE,
- 'share_idle_procs_always':
- pvm_bp.DedicatedSharingMode.SHARE_IDLE_PROCS_ALWAYS,
- }
-
- def __init__(self, host_w, adapter):
- """Initialize the converter.
-
- :param host_w: The host System wrapper.
- :param adapter: The pypowervm.adapter.Adapter for the PowerVM REST API.
- """
- self.adapter = adapter
- self.host_w = host_w
- kwargs = dict(proc_units_factor=CONF.powervm.proc_units_factor)
- self.stdz = lpar_bldr.DefaultStandardize(host_w, **kwargs)
-
- def lpar_builder(self, inst):
- """Returns the pypowervm LPARBuilder for a given Nova flavor.
-
- :param inst: the VM instance
- """
- attrs = self._format_flavor(inst)
- # TODO(thorst, efried) Add in IBMi attributes
- return lpar_bldr.LPARBuilder(self.adapter, attrs, self.stdz)
-
- def _format_flavor(self, inst):
- """Returns the pypowervm format of the flavor.
-
- :param inst: The Nova VM instance.
- :return: A dict that can be used by the LPAR builder.
- """
- # The attrs are what is sent to pypowervm to convert the lpar.
- attrs = {
- lpar_bldr.NAME: pvm_u.sanitize_partition_name_for_api(inst.name),
- # The uuid is only actually set on a create of an LPAR
- lpar_bldr.UUID: get_pvm_uuid(inst),
- lpar_bldr.MEM: inst.flavor.memory_mb,
- lpar_bldr.VCPU: inst.flavor.vcpus,
- # Set the srr capability to True by default
- lpar_bldr.SRR_CAPABLE: True}
-
- # Loop through the extra specs and process powervm keys
- for key in inst.flavor.extra_specs.keys():
- # If it is not a valid key, then can skip.
- if not self._is_pvm_valid_key(key):
- continue
-
- # Look for the mapping to the lpar builder
- bldr_key = self._ATTRS_MAP.get(key)
-
- # Check for no direct mapping, if the value is none, need to
- # derive the complex type
- if bldr_key is None:
- self._build_complex_type(key, attrs, inst.flavor)
- else:
- # We found a direct mapping
- attrs[bldr_key] = inst.flavor.extra_specs[key]
-
- return attrs
-
- def _is_pvm_valid_key(self, key):
- """Will return if this is a valid PowerVM key.
-
- :param key: The powervm key.
- :return: True if valid key. False if non-powervm key and should be
- skipped.
- """
- # If not a powervm key, then it is not 'pvm_valid'
- if not key.startswith('powervm:'):
- return False
-
- # Check if this is a valid attribute
- if key not in self._ATTRS_MAP:
- # Could be a key from a future release - warn, but ignore.
- LOG.warning("Unhandled PowerVM key '%s'.", key)
- return False
-
- return True
-
- def _build_complex_type(self, key, attrs, flavor):
- """If a key does not directly map, this method derives the right value.
-
- Some types are complex, in that the flavor may have one key that maps
- to several different attributes in the lpar builder. This method
- handles the complex types.
-
- :param key: The flavor's key.
- :param attrs: The attribute map to put the value into.
- :param flavor: The Nova instance flavor.
- :return: The value to put in for the key.
- """
- # Map uncapped to sharing mode
- if key == self._PVM_UNCAPPED:
- attrs[lpar_bldr.SHARING_MODE] = (
- pvm_bp.SharingMode.UNCAPPED
- if stru.bool_from_string(flavor.extra_specs[key], strict=True)
- else pvm_bp.SharingMode.CAPPED)
- elif key == self._PVM_DED_SHAR_MODE:
- # Dedicated sharing modes...map directly
- shr_mode_key = flavor.extra_specs[key]
- mode = self._DED_SHARING_MODES_MAP.get(shr_mode_key)
- if mode is None:
- raise exc.InvalidParameterValue(err=_(
- "Invalid dedicated sharing mode '%s'!") % shr_mode_key)
- attrs[lpar_bldr.SHARING_MODE] = mode
- elif key == self._PVM_SHAR_PROC_POOL:
- pool_name = flavor.extra_specs[key]
- attrs[lpar_bldr.SPP] = self._spp_pool_id(pool_name)
- elif key == self._PVM_PROC_COMPAT:
- # Handle variants of the supported values
- attrs[lpar_bldr.PROC_COMPAT] = re.sub(
- r'\+', '_Plus', flavor.extra_specs[key])
- elif key == self._PVM_SRR_CAPABILITY:
- attrs[lpar_bldr.SRR_CAPABLE] = stru.bool_from_string(
- flavor.extra_specs[key], strict=True)
- else:
- # There was no mapping or we didn't handle it. This is a BUG!
- raise KeyError(_(
- "Unhandled PowerVM key '%s'! Please report this bug.") % key)
-
- def _spp_pool_id(self, pool_name):
- """Returns the shared proc pool id for a given pool name.
-
- :param pool_name: The shared proc pool name.
- :return: The internal API id for the shared proc pool.
- """
- if (pool_name is None or
- pool_name == pvm_spp.DEFAULT_POOL_DISPLAY_NAME):
- # The default pool is 0
- return 0
-
- # Search for the pool with this name
- pool_wraps = pvm_spp.SharedProcPool.search(
- self.adapter, name=pool_name, parent=self.host_w)
-
- # Check to make sure there is a pool with the name, and only one pool.
- if len(pool_wraps) > 1:
- msg = (_('Multiple Shared Processing Pools with name %(pool)s.') %
- {'pool': pool_name})
- raise exc.ValidationError(msg)
- elif len(pool_wraps) == 0:
- msg = (_('Unable to find Shared Processing Pool %(pool)s') %
- {'pool': pool_name})
- raise exc.ValidationError(msg)
-
- # Return the singular pool id.
- return pool_wraps[0].id
diff --git a/nova/virt/powervm/volume/__init__.py b/nova/virt/powervm/volume/__init__.py
deleted file mode 100644
index bca5ce473b..0000000000
--- a/nova/virt/powervm/volume/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import exception
-from nova.i18n import _
-from nova.virt.powervm.volume import fcvscsi
-
-
-def build_volume_driver(adapter, instance, conn_info, stg_ftsk=None):
- drv_type = conn_info.get('driver_volume_type')
- if drv_type != 'fibre_channel':
- reason = _("Invalid connection type of %s") % drv_type
- raise exception.InvalidVolume(reason=reason)
- return fcvscsi.FCVscsiVolumeAdapter(adapter, instance, conn_info,
- stg_ftsk=stg_ftsk)
diff --git a/nova/virt/powervm/volume/fcvscsi.py b/nova/virt/powervm/volume/fcvscsi.py
deleted file mode 100644
index cf2c6e4d25..0000000000
--- a/nova/virt/powervm/volume/fcvscsi.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_concurrency import lockutils
-from oslo_log import log as logging
-from pypowervm import const as pvm_const
-from pypowervm.tasks import hdisk
-from pypowervm.tasks import partition as pvm_tpar
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import storage as pvm_stor
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-from taskflow import task
-
-from nova import conf as cfg
-from nova import exception as exc
-from nova.i18n import _
-from nova.virt import block_device
-from nova.virt.powervm import vm
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-LOCAL_FEED_TASK = 'local_feed_task'
-UDID_KEY = 'target_UDID'
-
-# A global variable that will cache the physical WWPNs on the system.
-_vscsi_pfc_wwpns = None
-
-
-@lockutils.synchronized('vscsi_wwpns')
-def wwpns(adapter):
- """Builds the WWPNs of the adapters that will connect the ports.
-
- :return: The list of WWPNs that need to be included in the zone set.
- """
- return pvm_tpar.get_physical_wwpns(adapter, force_refresh=False)
-
-
-class FCVscsiVolumeAdapter(object):
-
- def __init__(self, adapter, instance, connection_info, stg_ftsk=None):
- """Initialize the PowerVMVolumeAdapter
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance that the volume should attach to.
- :param connection_info: The volume connection info generated from the
- BDM. Used to determine how to attach the
- volume to the VM.
- :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
- I/O Operations. If provided, the Virtual I/O Server
- mapping updates will be added to the FeedTask. This
- defers the updates to some later point in time. If
- the FeedTask is not provided, the updates will be run
- immediately when the respective method is executed.
- """
- self.adapter = adapter
- self.instance = instance
- self.connection_info = connection_info
- self.vm_uuid = vm.get_pvm_uuid(instance)
- self.reset_stg_ftsk(stg_ftsk=stg_ftsk)
- self._pfc_wwpns = None
-
- @property
- def volume_id(self):
- """Method to return the volume id.
-
- Every driver must implement this method if the default impl will
- not work for their data.
- """
- return block_device.get_volume_id(self.connection_info)
-
- def reset_stg_ftsk(self, stg_ftsk=None):
- """Resets the pypowervm transaction FeedTask to a new value.
-
- The previous updates from the original FeedTask WILL NOT be migrated
- to this new FeedTask.
-
- :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
- I/O Operations. If provided, the Virtual I/O Server
- mapping updates will be added to the FeedTask. This
- defers the updates to some later point in time. If
- the FeedTask is not provided, the updates will be run
- immediately when this method is executed.
- """
- if stg_ftsk is None:
- getter = pvm_vios.VIOS.getter(
- self.adapter, xag=[pvm_const.XAG.VIO_SMAP])
- self.stg_ftsk = pvm_tx.FeedTask(LOCAL_FEED_TASK, getter)
- else:
- self.stg_ftsk = stg_ftsk
-
- def _set_udid(self, udid):
- """This method will set the hdisk udid in the connection_info.
-
- :param udid: The hdisk target_udid to be stored in system_metadata
- """
- self.connection_info['data'][UDID_KEY] = udid
-
- def _get_udid(self):
- """This method will return the hdisk udid stored in connection_info.
-
- :return: The target_udid associated with the hdisk
- """
- try:
- return self.connection_info['data'][UDID_KEY]
- except (KeyError, ValueError):
- # It's common to lose our specific data in the BDM. The connection
- # information can be 'refreshed' by operations like live migrate
- # and resize
- LOG.info('Failed to retrieve target_UDID key from BDM for volume '
- 'id %s', self.volume_id, instance=self.instance)
- return None
-
- def attach_volume(self):
- """Attaches the volume."""
-
- # Check if the VM is in a state where the attach is acceptable.
- lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
- capable, reason = lpar_w.can_modify_io()
- if not capable:
- raise exc.VolumeAttachFailed(
- volume_id=self.volume_id, reason=reason)
-
- # Its about to get weird. The transaction manager has a list of
- # VIOSes. We could use those, but they only have SCSI mappings (by
- # design). They do not have storage (super expensive).
- #
- # We need the storage xag when we are determining which mappings to
- # add to the system. But we don't want to tie it to the stg_ftsk. If
- # we do, every retry, every etag gather, etc... takes MUCH longer.
- #
- # So we get the VIOSes with the storage xag here, separately, to save
- # the stg_ftsk from potentially having to run it multiple times.
- attach_ftsk = pvm_tx.FeedTask(
- 'attach_volume_to_vio', pvm_vios.VIOS.getter(
- self.adapter, xag=[pvm_const.XAG.VIO_STOR,
- pvm_const.XAG.VIO_SMAP]))
-
- # Find valid hdisks and map to VM.
- attach_ftsk.add_functor_subtask(
- self._attach_volume_to_vio, provides='vio_modified',
- flag_update=False)
-
- ret = attach_ftsk.execute()
-
- # Check the number of VIOSes
- vioses_modified = 0
- for result in ret['wrapper_task_rets'].values():
- if result['vio_modified']:
- vioses_modified += 1
-
- # Validate that a vios was found
- if vioses_modified == 0:
- msg = (_('Failed to discover valid hdisk on any Virtual I/O '
- 'Server for volume %(volume_id)s.') %
- {'volume_id': self.volume_id})
- ex_args = {'volume_id': self.volume_id, 'reason': msg}
- raise exc.VolumeAttachFailed(**ex_args)
-
- self.stg_ftsk.execute()
-
- def _attach_volume_to_vio(self, vios_w):
- """Attempts to attach a volume to a given VIO.
-
- :param vios_w: The Virtual I/O Server wrapper to attach to.
- :return: True if the volume was attached. False if the volume was
- not (could be the Virtual I/O Server does not have
- connectivity to the hdisk).
- """
- status, device_name, udid = self._discover_volume_on_vios(vios_w)
-
- if hdisk.good_discovery(status, device_name):
- # Found a hdisk on this Virtual I/O Server. Add the action to
- # map it to the VM when the stg_ftsk is executed.
- with lockutils.lock(self.volume_id):
- self._add_append_mapping(vios_w.uuid, device_name,
- tag=self.volume_id)
-
- # Save the UDID for the disk in the connection info. It is
- # used for the detach.
- self._set_udid(udid)
- LOG.debug('Added deferred task to attach device %(device_name)s '
- 'to vios %(vios_name)s.',
- {'device_name': device_name, 'vios_name': vios_w.name},
- instance=self.instance)
-
- # Valid attachment
- return True
-
- return False
-
- def extend_volume(self):
- # The compute node does not need to take any additional steps for the
- # client to see the extended volume.
- pass
-
- def _discover_volume_on_vios(self, vios_w):
- """Discovers an hdisk on a single vios for the volume.
-
- :param vios_w: VIOS wrapper to process
- :returns: Status of the volume or None
- :returns: Device name or None
- :returns: UDID or None
- """
- # Get the initiatior WWPNs, targets and Lun for the given VIOS.
- vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)
-
- # Build the ITL map and discover the hdisks on the Virtual I/O
- # Server (if any).
- itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
- if len(itls) == 0:
- LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.',
- {'vios': vios_w.name, 'volume_id': self.volume_id},
- instance=self.instance)
- return None, None, None
-
- status, device_name, udid = hdisk.discover_hdisk(self.adapter,
- vios_w.uuid, itls)
-
- if hdisk.good_discovery(status, device_name):
- LOG.info('Discovered %(hdisk)s on vios %(vios)s for volume '
- '%(volume_id)s. Status code: %(status)s.',
- {'hdisk': device_name, 'vios': vios_w.name,
- 'volume_id': self.volume_id, 'status': status},
- instance=self.instance)
- elif status == hdisk.LUAStatus.DEVICE_IN_USE:
- LOG.warning('Discovered device %(dev)s for volume %(volume)s '
- 'on %(vios)s is in use. Error code: %(status)s.',
- {'dev': device_name, 'volume': self.volume_id,
- 'vios': vios_w.name, 'status': status},
- instance=self.instance)
-
- return status, device_name, udid
-
- def _get_hdisk_itls(self, vios_w):
- """Returns the mapped ITLs for the hdisk for the given VIOS.
-
- A PowerVM system may have multiple Virtual I/O Servers to virtualize
- the I/O to the virtual machines. Each Virtual I/O server may have their
- own set of initiator WWPNs, target WWPNs and Lun on which hdisk is
- mapped. It will determine and return the ITLs for the given VIOS.
-
- :param vios_w: A virtual I/O Server wrapper.
- :return: List of the i_wwpns that are part of the vios_w,
- :return: List of the t_wwpns that are part of the vios_w,
- :return: Target lun id of the hdisk for the vios_w.
- """
- it_map = self.connection_info['data']['initiator_target_map']
- i_wwpns = it_map.keys()
-
- active_wwpns = vios_w.get_active_pfc_wwpns()
- vio_wwpns = [x for x in i_wwpns if x in active_wwpns]
-
- t_wwpns = []
- for it_key in vio_wwpns:
- t_wwpns.extend(it_map[it_key])
- lun = self.connection_info['data']['target_lun']
-
- return vio_wwpns, t_wwpns, lun
-
- def _add_append_mapping(self, vios_uuid, device_name, tag=None):
- """Update the stg_ftsk to append the mapping to the VIOS.
-
- :param vios_uuid: The UUID of the vios for the pypowervm adapter.
- :param device_name: The hdisk device name.
- :param tag: String tag to set on the physical volume.
- """
- def add_func(vios_w):
- LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s on "
- "vios %(vios)s.",
- {'dev': device_name, 'vios': vios_w.name},
- instance=self.instance)
- pv = pvm_stor.PV.bld(self.adapter, device_name, tag=tag)
- v_map = tsk_map.build_vscsi_mapping(None, vios_w, self.vm_uuid, pv)
- return tsk_map.add_map(vios_w, v_map)
- self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
-
- def detach_volume(self):
- """Detach the volume."""
-
- # Check if the VM is in a state where the detach is acceptable.
- lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
- capable, reason = lpar_w.can_modify_io()
- if not capable:
- raise exc.VolumeDetachFailed(
- volume_id=self.volume_id, reason=reason)
-
- # Run the detach
- try:
- # See logic in attach_volume for why this new FeedTask is here.
- detach_ftsk = pvm_tx.FeedTask(
- 'detach_volume_from_vio', pvm_vios.VIOS.getter(
- self.adapter, xag=[pvm_const.XAG.VIO_STOR,
- pvm_const.XAG.VIO_SMAP]))
- # Find hdisks to detach
- detach_ftsk.add_functor_subtask(
- self._detach_vol_for_vio, provides='vio_modified',
- flag_update=False)
-
- ret = detach_ftsk.execute()
-
- # Warn if no hdisks detached.
- if not any([result['vio_modified']
- for result in ret['wrapper_task_rets'].values()]):
- LOG.warning("Detach Volume: Failed to detach the "
- "volume %(volume_id)s on ANY of the Virtual "
- "I/O Servers.", {'volume_id': self.volume_id},
- instance=self.instance)
-
- except Exception as e:
- LOG.exception('PowerVM error detaching volume from virtual '
- 'machine.', instance=self.instance)
- ex_args = {'volume_id': self.volume_id, 'reason': str(e)}
- raise exc.VolumeDetachFailed(**ex_args)
- self.stg_ftsk.execute()
-
- def _detach_vol_for_vio(self, vios_w):
- """Removes the volume from a specific Virtual I/O Server.
-
- :param vios_w: The VIOS wrapper.
- :return: True if a remove action was done against this VIOS. False
- otherwise.
- """
- LOG.debug("Detach volume %(vol)s from vios %(vios)s",
- dict(vol=self.volume_id, vios=vios_w.name),
- instance=self.instance)
- device_name = None
- udid = self._get_udid()
- try:
- if udid:
- # This will only work if vios_w has the Storage XAG.
- device_name = vios_w.hdisk_from_uuid(udid)
-
- if not udid or not device_name:
- # We lost our bdm data. We'll need to discover it.
- status, device_name, udid = self._discover_volume_on_vios(
- vios_w)
-
- # Check if the hdisk is in a bad state in the I/O Server.
- # Subsequent scrub code on future deploys will clean this up.
- if not hdisk.good_discovery(status, device_name):
- LOG.warning(
- "Detach Volume: The backing hdisk for volume "
- "%(volume_id)s on Virtual I/O Server %(vios)s is "
- "not in a valid state. This may be the result of "
- "an evacuate.",
- {'volume_id': self.volume_id, 'vios': vios_w.name},
- instance=self.instance)
- return False
-
- except Exception:
- LOG.exception(
- "Detach Volume: Failed to find disk on Virtual I/O "
- "Server %(vios_name)s for volume %(volume_id)s. Volume "
- "UDID: %(volume_uid)s.",
- {'vios_name': vios_w.name, 'volume_id': self.volume_id,
- 'volume_uid': udid, }, instance=self.instance)
- return False
-
- # We have found the device name
- LOG.info("Detach Volume: Discovered the device %(hdisk)s "
- "on Virtual I/O Server %(vios_name)s for volume "
- "%(volume_id)s. Volume UDID: %(volume_uid)s.",
- {'hdisk': device_name, 'vios_name': vios_w.name,
- 'volume_id': self.volume_id, 'volume_uid': udid},
- instance=self.instance)
-
- # Add the action to remove the mapping when the stg_ftsk is run.
- partition_id = vm.get_vm_qp(self.adapter, self.vm_uuid,
- qprop='PartitionID')
-
- with lockutils.lock(self.volume_id):
- self._add_remove_mapping(partition_id, vios_w.uuid,
- device_name)
-
- # Add a step to also remove the hdisk
- self._add_remove_hdisk(vios_w, device_name)
-
- # Found a valid element to remove
- return True
-
- def _add_remove_mapping(self, vm_uuid, vios_uuid, device_name):
- """Adds a subtask to remove the storage mapping.
-
- :param vm_uuid: The UUID of the VM instance
- :param vios_uuid: The UUID of the vios for the pypowervm adapter.
- :param device_name: The hdisk device name.
- """
- def rm_func(vios_w):
- LOG.info("Removing vSCSI mapping from physical volume %(dev)s "
- "on vios %(vios)s",
- {'dev': device_name, 'vios': vios_w.name},
- instance=self.instance)
- removed_maps = tsk_map.remove_maps(
- vios_w, vm_uuid,
- tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
- return removed_maps
- self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)
-
- def _add_remove_hdisk(self, vio_wrap, device_name):
- """Adds a post-mapping task to remove the hdisk from the VIOS.
-
- This removal is only done after the mapping updates have completed.
-
- :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
- from.
- :param device_name: The hdisk name to remove.
- """
- def rm_hdisk():
- LOG.info("Removing hdisk %(hdisk)s from Virtual I/O Server "
- "%(vios)s", {'hdisk': device_name, 'vios': vio_wrap.name},
- instance=self.instance)
- try:
- # Attempt to remove the hDisk
- hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
- vio_wrap.uuid)
- except Exception:
- # If there is a failure, log it, but don't stop the process
- LOG.exception("There was an error removing the hdisk "
- "%(disk)s from Virtual I/O Server %(vios)s.",
- {'disk': device_name, 'vios': vio_wrap.name},
- instance=self.instance)
-
- # Check if there are not multiple mapping for the device
- if not self._check_host_mappings(vio_wrap, device_name):
- name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
- self.stg_ftsk.add_post_execute(task.FunctorTask(
- rm_hdisk, name=name))
- else:
- LOG.info("hdisk %(disk)s is not removed from Virtual I/O Server "
- "%(vios)s because it has existing storage mappings",
- {'disk': device_name, 'vios': vio_wrap.name},
- instance=self.instance)
-
- def _check_host_mappings(self, vios_wrap, device_name):
- """Checks if the given hdisk has multiple mappings
-
- :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
- from.
- :param device_name: The hdisk name to remove.
- :return: True if there are multiple instances using the given hdisk
- """
- vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed
- if v.uuid == vios_wrap.uuid)
- mappings = tsk_map.find_maps(
- vios_scsi_mappings, None,
- tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
-
- LOG.debug("%(num)d storage mapping(s) found for %(dev)s on VIOS "
- "%(vios)s", {'num': len(mappings), 'dev': device_name,
- 'vios': vios_wrap.name}, instance=self.instance)
- # The mapping is still present as the task feed removes it later.
- return len(mappings) > 1
diff --git a/nova/virt/vmwareapi/constants.py b/nova/virt/vmwareapi/constants.py
index 6452434ce7..2a42174bf7 100644
--- a/nova/virt/vmwareapi/constants.py
+++ b/nova/virt/vmwareapi/constants.py
@@ -27,7 +27,8 @@ MIN_VC_OVS_VERSION = '5.5.0'
DISK_FORMAT_ISO = 'iso'
DISK_FORMAT_VMDK = 'vmdk'
DISK_FORMAT_ISCSI = 'iscsi'
-DISK_FORMATS_ALL = [DISK_FORMAT_ISO, DISK_FORMAT_VMDK]
+DISK_FORMAT_FCD = 'vstorageobject'
+DISK_FORMATS_ALL = [DISK_FORMAT_ISO, DISK_FORMAT_VMDK, DISK_FORMAT_FCD]
DISK_TYPE_THIN = 'thin'
CONTAINER_FORMAT_BARE = 'bare'
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 9934627e1e..aa728ea694 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -27,10 +27,8 @@ from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from oslo_utils import versionutils as v_utils
-from oslo_vmware import api
from oslo_vmware import exceptions as vexc
from oslo_vmware import pbm
-from oslo_vmware import vim
from oslo_vmware import vim_util
from nova.compute import power_state
@@ -47,6 +45,7 @@ from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vim_util as nova_vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
@@ -72,6 +71,7 @@ class VMwareVCDriver(driver.ComputeDriver):
"supports_trusted_certs": False,
"supports_pcpus": False,
"supports_accelerators": False,
+ "supports_remote_managed_ports": False,
# Image type support flags
"supports_image_type_aki": False,
@@ -87,13 +87,6 @@ class VMwareVCDriver(driver.ComputeDriver):
"supports_image_type_ploop": False,
}
- # Legacy nodename is of the form: <mo id>(<cluster name>)
- # e.g. domain-26(TestCluster)
- # We assume <mo id> consists of alphanumeric, _ and -.
- # We assume cluster name is everything between the first ( and the last ).
- # We pull out <mo id> for re-use.
- LEGACY_NODENAME = re.compile(r'([\w-]+)\(.+\)')
-
# The vCenter driver includes API that acts on ESX hosts or groups
# of ESX hosts in clusters or non-cluster logical-groupings.
#
@@ -119,7 +112,7 @@ class VMwareVCDriver(driver.ComputeDriver):
_("Invalid Regular Expression %s")
% CONF.vmware.datastore_regex)
- self._session = VMwareAPISession(scheme=scheme)
+ self._session = session.VMwareAPISession(scheme=scheme)
self._check_min_version()
@@ -525,6 +518,10 @@ class VMwareVCDriver(driver.ComputeDriver):
# where cpu traits are added. In the vmware world, this is where we
# would add nested providers representing tenant VDC and similar.
+ def prepare_for_spawn(self, instance):
+ """Perform pre-checks for spawn."""
+ self._vmops.prepare_for_spawn(instance)
+
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None, power_on=True, accel_info=None):
@@ -718,50 +715,3 @@ class VMwareVCDriver(driver.ComputeDriver):
def detach_interface(self, context, instance, vif):
"""Detach an interface from the instance."""
self._vmops.detach_interface(context, instance, vif)
-
-
-class VMwareAPISession(api.VMwareAPISession):
- """Sets up a session with the VC/ESX host and handles all
- the calls made to the host.
- """
-
- def __init__(self, host_ip=CONF.vmware.host_ip,
- host_port=CONF.vmware.host_port,
- username=CONF.vmware.host_username,
- password=CONF.vmware.host_password,
- retry_count=CONF.vmware.api_retry_count,
- scheme="https",
- cacert=CONF.vmware.ca_file,
- insecure=CONF.vmware.insecure,
- pool_size=CONF.vmware.connection_pool_size):
- super(VMwareAPISession, self).__init__(
- host=host_ip,
- port=host_port,
- server_username=username,
- server_password=password,
- api_retry_count=retry_count,
- task_poll_interval=CONF.vmware.task_poll_interval,
- scheme=scheme,
- create_session=True,
- cacert=cacert,
- insecure=insecure,
- pool_size=pool_size)
-
- def _is_vim_object(self, module):
- """Check if the module is a VIM Object instance."""
- return isinstance(module, vim.Vim)
-
- def _call_method(self, module, method, *args, **kwargs):
- """Calls a method within the module specified with
- args provided.
- """
- if not self._is_vim_object(module):
- return self.invoke_api(module, method, self.vim, *args, **kwargs)
- else:
- return self.invoke_api(module, method, *args, **kwargs)
-
- def _wait_for_task(self, task_ref):
- """Return a Deferred that will give the result of the given task.
- The task is polled until it completes.
- """
- return self.wait_for_task(task_ref)
diff --git a/nova/virt/vmwareapi/session.py b/nova/virt/vmwareapi/session.py
new file mode 100644
index 0000000000..973db5760f
--- /dev/null
+++ b/nova/virt/vmwareapi/session.py
@@ -0,0 +1,157 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 VMware, Inc.
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import itertools
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_vmware import api
+from oslo_vmware import exceptions as vexc
+from oslo_vmware import vim
+from oslo_vmware.vim_util import get_moref_value
+
+import nova.conf
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+
+
+class StableMoRefProxy(metaclass=abc.ABCMeta):
+ """Abstract base class which acts as a proxy
+ for Managed-Object-References (MoRef).
+ Those references are usually "stable", meaning
+ they don't change over the life-time of the object.
+ But usually doesn't mean always. In that case, we
+ need to fetch the reference again via some search method,
+ which uses a guaranteed stable identifier (names, uuids, ...)
+ """
+
+ def __init__(self, ref):
+ self.moref = ref
+
+ @property
+ def __class__(self):
+ # Suds accesses the __class__.__name__ attribute
+ # of the object to determine the xml-tag of the object
+ # so we have to fake it
+ return self.moref.__class__
+
+ @abc.abstractmethod
+ def fetch_moref(self, session):
+ """Updates the moref field or raises
+ same exception the initial search would have
+ """
+
+ def __getattr__(self, name):
+ return getattr(self.moref, name)
+
+ def __repr__(self):
+ return "StableMoRefProxy({!r})".format(self.moref)
+
+
+class MoRef(StableMoRefProxy):
+ """MoRef takes a closure to resolve the reference of a managed object
+ That closure is called again, in case we get a ManagedObjectNotFound
+ exception on said reference.
+ """
+
+ def __init__(self, closure, ref=None):
+ self._closure = closure
+ ref = ref or self._closure()
+ super().__init__(ref)
+
+ def fetch_moref(self, _):
+ self.moref = self._closure()
+
+ def __repr__(self):
+ return "MoRef({!r})".format(self.moref)
+
+
+class VMwareAPISession(api.VMwareAPISession):
+ """Sets up a session with the VC/ESX host and handles all
+ the calls made to the host.
+ """
+
+ def __init__(self, host_ip=CONF.vmware.host_ip,
+ host_port=CONF.vmware.host_port,
+ username=CONF.vmware.host_username,
+ password=CONF.vmware.host_password,
+ retry_count=CONF.vmware.api_retry_count,
+ scheme="https",
+ cacert=CONF.vmware.ca_file,
+ insecure=CONF.vmware.insecure,
+ pool_size=CONF.vmware.connection_pool_size):
+ super(VMwareAPISession, self).__init__(
+ host=host_ip,
+ port=host_port,
+ server_username=username,
+ server_password=password,
+ api_retry_count=retry_count,
+ task_poll_interval=CONF.vmware.task_poll_interval,
+ scheme=scheme,
+ create_session=True,
+ cacert=cacert,
+ insecure=insecure,
+ pool_size=pool_size)
+
+ @staticmethod
+ def _is_vim_object(module):
+ """Check if the module is a VIM Object instance."""
+ return isinstance(module, vim.Vim)
+
+ def _call_method(self, module, method, *args, **kwargs):
+ """Calls a method within the module specified with
+ args provided.
+ """
+ try:
+ if not self._is_vim_object(module):
+ return self.invoke_api(module, method, self.vim, *args,
+ **kwargs)
+ return self.invoke_api(module, method, *args, **kwargs)
+ except vexc.ManagedObjectNotFoundException as monfe:
+ with excutils.save_and_reraise_exception() as ctxt:
+ moref = monfe.details.get("obj") if monfe.details else None
+ for arg in itertools.chain(args, kwargs.values()):
+ if not isinstance(arg, StableMoRefProxy):
+ continue
+ moref_arg = get_moref_value(arg.moref)
+ if moref != moref_arg:
+ continue
+ # We have found the argument with the moref
+ # causing the exception and we can try to recover it
+ arg.fetch_moref(self)
+ if not arg.moref:
+ # We didn't recover the reference
+ ctxt.reraise = True
+ break
+ moref_arg = get_moref_value(arg.moref)
+ if moref != moref_arg:
+ # We actually recovered, so do not raise `monfe`
+ LOG.info("Replaced moref %s with %s",
+ moref, moref_arg)
+ ctxt.reraise = False
+ # We only end up here when we have recovered a moref by changing
+ # the stored value of an argument to a different value,
+ # so let's try again (and recover again if it happens more than once)
+ return self._call_method(module, method, *args, **kwargs)
+
+ def _wait_for_task(self, task_ref):
+ """Return a Deferred that will give the result of the given task.
+ The task is polled until it completes.
+ """
+ return self.wait_for_task(task_ref)
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index 01a2e18c8d..7aaf5ca827 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -20,7 +20,6 @@ The VMware API VM utility module to build SOAP object specs.
import collections
import copy
-import functools
from oslo_log import log as logging
from oslo_service import loopingcall
@@ -37,6 +36,7 @@ from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vim_util
LOG = logging.getLogger(__name__)
@@ -119,32 +119,16 @@ def vm_refs_cache_reset():
_VM_REFS_CACHE = {}
-def vm_ref_cache_delete(id):
- _VM_REFS_CACHE.pop(id, None)
+def vm_ref_cache_delete(id_):
+ _VM_REFS_CACHE.pop(id_, None)
-def vm_ref_cache_update(id, vm_ref):
- _VM_REFS_CACHE[id] = vm_ref
+def vm_ref_cache_update(id_, vm_ref):
+ _VM_REFS_CACHE[id_] = vm_ref
-def vm_ref_cache_get(id):
- return _VM_REFS_CACHE.get(id)
-
-
-def _vm_ref_cache(id, func, session, data):
- vm_ref = vm_ref_cache_get(id)
- if not vm_ref:
- vm_ref = func(session, data)
- vm_ref_cache_update(id, vm_ref)
- return vm_ref
-
-
-def vm_ref_cache_from_instance(func):
- @functools.wraps(func)
- def wrapper(session, instance):
- id = instance.uuid
- return _vm_ref_cache(id, func, session, instance)
- return wrapper
+def vm_ref_cache_get(id_):
+ return _VM_REFS_CACHE.get(id_)
# the config key which stores the VNC port
@@ -1131,15 +1115,25 @@ def _get_vm_ref_from_extraconfig(session, instance_uuid):
_get_object_for_optionvalue)
-@vm_ref_cache_from_instance
+class VmMoRefProxy(session.StableMoRefProxy):
+ def __init__(self, ref, uuid):
+ super(VmMoRefProxy, self).__init__(ref)
+ self._uuid = uuid
+
+ def fetch_moref(self, session):
+ self.moref = search_vm_ref_by_identifier(session, self._uuid)
+ if not self.moref:
+ raise exception.InstanceNotFound(instance_id=self._uuid)
+ vm_ref_cache_update(self._uuid, self.moref)
+
+
def get_vm_ref(session, instance):
- """Get reference to the VM through uuid or vm name."""
- uuid = instance.uuid
- vm_ref = (search_vm_ref_by_identifier(session, uuid) or
- _get_vm_ref_from_name(session, instance.name))
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=uuid)
- return vm_ref
+ """Get reference to the VM through uuid."""
+ moref = vm_ref_cache_get(instance.uuid)
+ stable_ref = VmMoRefProxy(moref, instance.uuid)
+ if not moref:
+ stable_ref.fetch_moref(session)
+ return stable_ref
def search_vm_ref_by_identifier(session, identifier):
@@ -1151,8 +1145,7 @@ def search_vm_ref_by_identifier(session, identifier):
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
- _get_vm_ref_from_extraconfig(session, identifier) or
- _get_vm_ref_from_name(session, identifier))
+ _get_vm_ref_from_extraconfig(session, identifier))
return vm_ref
@@ -1536,8 +1529,8 @@ def find_rescue_device(hardware_devices, instance):
raise exception.NotFound(msg)
-def get_ephemeral_name(id):
- return 'ephemeral_%d.vmdk' % id
+def get_ephemeral_name(id_):
+ return 'ephemeral_%d.vmdk' % id_
def _detach_and_delete_devices_config_spec(client_factory, devices):
@@ -1619,11 +1612,11 @@ def folder_ref_cache_get(path):
return _FOLDER_PATH_REF_MAPPING.get(path)
-def _get_vm_name(display_name, id):
+def _get_vm_name(display_name, id_):
if display_name:
- return '%s (%s)' % (display_name[:41], id[:36])
- else:
- return id[:36]
+ return '%s (%s)' % (display_name[:41], id_[:36])
+
+ return id_[:36]
def rename_vm(session, vm_ref, instance):
@@ -1631,3 +1624,36 @@ def rename_vm(session, vm_ref, instance):
rename_task = session._call_method(session.vim, "Rename_Task", vm_ref,
newName=vm_name)
session._wait_for_task(rename_task)
+
+
+def _create_fcd_id_obj(client_factory, fcd_id):
+ id_obj = client_factory.create('ns0:ID')
+ id_obj.id = fcd_id
+ return id_obj
+
+
+def attach_fcd(
+ session, vm_ref, fcd_id, ds_ref_val, controller_key, unit_number
+ ):
+ client_factory = session.vim.client.factory
+ disk_id = _create_fcd_id_obj(client_factory, fcd_id)
+ ds_ref = vutil.get_moref(ds_ref_val, 'Datastore')
+ LOG.debug("Attaching fcd (id: %(fcd_id)s, datastore: %(ds_ref_val)s) to "
+ "vm: %(vm_ref)s.",
+ {'fcd_id': fcd_id,
+ 'ds_ref_val': ds_ref_val,
+ 'vm_ref': vm_ref})
+ task = session._call_method(
+ session.vim, "AttachDisk_Task", vm_ref, diskId=disk_id,
+ datastore=ds_ref, controllerKey=controller_key, unitNumber=unit_number)
+ session._wait_for_task(task)
+
+
+def detach_fcd(session, vm_ref, fcd_id):
+ client_factory = session.vim.client.factory
+ disk_id = _create_fcd_id_obj(client_factory, fcd_id)
+ LOG.debug("Detaching fcd (id: %(fcd_id)s) from vm: %(vm_ref)s.",
+ {'fcd_id': fcd_id, 'vm_ref': vm_ref})
+ task = session._call_method(
+ session.vim, "DetachDisk_Task", vm_ref, diskId=disk_id)
+ session._wait_for_task(task)
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 1225581deb..7da453bdb1 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -263,11 +263,11 @@ class VMwareVMOps(object):
parent_folder = folder_ref
return folder_ref
- def _get_folder_name(self, name, id):
+ def _get_folder_name(self, name, id_):
# Maximum folder length must be less than 80 characters.
# The 'id' length is 36. The maximum prefix for name is 40.
# We cannot truncate the 'id' as this is unique across OpenStack.
- return '%s (%s)' % (name[:40], id[:36])
+ return '%s (%s)' % (name[:40], id_[:36])
def build_virtual_machine(self, instance, image_info,
dc_info, datastore, network_info, extra_specs,
@@ -729,6 +729,12 @@ class VMwareVMOps(object):
if new_size is not None:
vi.ii.file_size = new_size
+ def prepare_for_spawn(self, instance):
+ if (int(instance.flavor.memory_mb) % 4 != 0):
+ reason = _("Memory size is not multiple of 4")
+ raise exception.InstanceUnacceptable(instance_id=instance.uuid,
+ reason=reason)
+
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
index 613dc671c9..e1d60cc751 100644
--- a/nova/virt/vmwareapi/volumeops.py
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -26,12 +26,30 @@ import nova.conf
from nova import exception
from nova.i18n import _
from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vm_util
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
+class VolumeMoRefProxy(session.StableMoRefProxy):
+ def __init__(self, connection_info_data):
+ volume_ref_value = connection_info_data.get('volume')
+ ref = None
+ if volume_ref_value:
+ ref = vutil.get_moref(volume_ref_value, 'VirtualMachine')
+ super(VolumeMoRefProxy, self).__init__(ref)
+ self._connection_info_data = connection_info_data
+
+ def fetch_moref(self, session):
+ volume_id = self._connection_info_data.get('volume_id')
+ if not volume_id:
+ volume_id = self._connection_info_data.get('name')
+ if volume_id:
+ self.moref = vm_util._get_vm_ref_from_vm_uuid(session, volume_id)
+
+
class VMwareVolumeOps(object):
"""Management class for Volume-related tasks."""
@@ -300,9 +318,10 @@ class VMwareVolumeOps(object):
connector['instance'] = vutil.get_moref_value(vm_ref)
return connector
- def _get_volume_ref(self, volume_ref_name):
- """Get the volume moref from the ref name."""
- return vutil.get_moref(volume_ref_name, 'VirtualMachine')
+ @staticmethod
+ def _get_volume_ref(connection_info_data):
+ """Get the volume moref from the "data" field in connection_info ."""
+ return VolumeMoRefProxy(connection_info_data)
def _get_vmdk_base_volume_device(self, volume_ref):
# Get the vmdk file name that the VM is pointing to
@@ -317,7 +336,7 @@ class VMwareVolumeOps(object):
LOG.debug("_attach_volume_vmdk: %s", connection_info,
instance=instance)
data = connection_info['data']
- volume_ref = self._get_volume_ref(data['volume'])
+ volume_ref = self._get_volume_ref(data)
# Get details required for adding disk device such as
# adapter_type, disk_type
@@ -367,6 +386,53 @@ class VMwareVolumeOps(object):
device_name=device_name)
LOG.debug("Attached ISCSI: %s", connection_info, instance=instance)
+ def _get_controller_key_and_unit(self, vm_ref, adapter_type):
+ LOG.debug("_get_controller_key_and_unit vm: %(vm_ref)s, adapter: "
+ "%(adapter)s.",
+ {'vm_ref': vm_ref, 'adapter': adapter_type})
+ client_factory = self._session.vim.client.factory
+ devices = self._session._call_method(vutil,
+ "get_object_property",
+ vm_ref,
+ "config.hardware.device")
+ return vm_util.allocate_controller_key_and_unit_number(
+ client_factory, devices, adapter_type)
+
+ def _attach_fcd(self, vm_ref, adapter_type, fcd_id, ds_ref_val):
+ (controller_key, unit_number,
+ controller_spec) = self._get_controller_key_and_unit(
+ vm_ref, adapter_type)
+
+ if controller_spec:
+ # No controller available to attach, create one first.
+ config_spec = self._session.vim.client.factory.create(
+ 'ns0:VirtualMachineConfigSpec')
+ config_spec.deviceChange = [controller_spec]
+ vm_util.reconfigure_vm(self._session, vm_ref, config_spec)
+ (controller_key, unit_number,
+ controller_spec) = self._get_controller_key_and_unit(
+ vm_ref, adapter_type)
+
+ vm_util.attach_fcd(
+ self._session, vm_ref, fcd_id, ds_ref_val, controller_key,
+ unit_number)
+
+ def _attach_volume_fcd(self, connection_info, instance):
+ """Attach fcd volume storage to VM instance."""
+ LOG.debug("_attach_volume_fcd: %s", connection_info, instance=instance)
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
+ data = connection_info['data']
+ adapter_type = data['adapter_type']
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE:
+ state = vm_util.get_vm_state(self._session, instance)
+ if state != power_state.SHUTDOWN:
+ raise exception.Invalid(_('%s does not support disk '
+ 'hotplug.') % adapter_type)
+
+ self._attach_fcd(vm_ref, adapter_type, data['id'], data['ds_ref_val'])
+ LOG.debug("Attached fcd: %s", connection_info, instance=instance)
+
def attach_volume(self, connection_info, instance, adapter_type=None):
"""Attach volume storage to VM instance."""
driver_type = connection_info['driver_volume_type']
@@ -376,6 +442,8 @@ class VMwareVolumeOps(object):
self._attach_volume_vmdk(connection_info, instance, adapter_type)
elif driver_type == constants.DISK_FORMAT_ISCSI:
self._attach_volume_iscsi(connection_info, instance, adapter_type)
+ elif driver_type == constants.DISK_FORMAT_FCD:
+ self._attach_volume_fcd(connection_info, instance)
else:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
@@ -503,7 +571,7 @@ class VMwareVolumeOps(object):
LOG.debug("_detach_volume_vmdk: %s", connection_info,
instance=instance)
data = connection_info['data']
- volume_ref = self._get_volume_ref(data['volume'])
+ volume_ref = self._get_volume_ref(data)
device = self._get_vmdk_backed_disk_device(vm_ref, data)
@@ -558,6 +626,20 @@ class VMwareVolumeOps(object):
self.detach_disk_from_vm(vm_ref, instance, device, destroy_disk=True)
LOG.debug("Detached ISCSI: %s", connection_info, instance=instance)
+ def _detach_volume_fcd(self, connection_info, instance):
+ """Detach fcd volume storage to VM instance."""
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
+ data = connection_info['data']
+ adapter_type = data['adapter_type']
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE:
+ state = vm_util.get_vm_state(self._session, instance)
+ if state != power_state.SHUTDOWN:
+ raise exception.Invalid(_('%s does not support disk '
+ 'hotplug.') % adapter_type)
+
+ vm_util.detach_fcd(self._session, vm_ref, data['id'])
+
def detach_volume(self, connection_info, instance):
"""Detach volume storage to VM instance."""
driver_type = connection_info['driver_volume_type']
@@ -567,6 +649,8 @@ class VMwareVolumeOps(object):
self._detach_volume_vmdk(connection_info, instance)
elif driver_type == constants.DISK_FORMAT_ISCSI:
self._detach_volume_iscsi(connection_info, instance)
+ elif driver_type == constants.DISK_FORMAT_FCD:
+ self._detach_volume_fcd(connection_info, instance)
else:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
@@ -580,7 +664,7 @@ class VMwareVolumeOps(object):
vm_ref = vm_util.get_vm_ref(self._session, instance)
data = connection_info['data']
# Get the volume ref
- volume_ref = self._get_volume_ref(data['volume'])
+ volume_ref = self._get_volume_ref(data)
# Pick the resource pool on which the instance resides. Move the
# volume to the datastore of the instance.
res_pool = self._get_res_pool_of_vm(vm_ref)
diff --git a/nova/virt/zvm/driver.py b/nova/virt/zvm/driver.py
index c3552ecc38..a1fa721515 100644
--- a/nova/virt/zvm/driver.py
+++ b/nova/virt/zvm/driver.py
@@ -46,6 +46,7 @@ class ZVMDriver(driver.ComputeDriver):
"""z/VM implementation of ComputeDriver."""
capabilities = {
"supports_pcpus": False,
+ "supports_remote_managed_ports": False,
# Image type support flags
"supports_image_type_aki": False,
diff --git a/nova/virt/zvm/hypervisor.py b/nova/virt/zvm/hypervisor.py
index fb7cbc5a18..8a2c49d34b 100644
--- a/nova/virt/zvm/hypervisor.py
+++ b/nova/virt/zvm/hypervisor.py
@@ -131,7 +131,7 @@ class Hypervisor(object):
def guest_get_console_output(self, name):
"""get console out put of the given instance
- :returns: The output of the console of the instace, in string format.
+ :returns: The output of the console of the instance, in string format.
"""
return self._reqh.call('guest_get_console_output', name)