summaryrefslogtreecommitdiff
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/api/metadata/base.py4
-rw-r--r--nova/api/openstack/compute/views/servers.py32
-rw-r--r--nova/cmd/common.py4
-rw-r--r--nova/compute/api.py341
-rw-r--r--nova/compute/claims.py29
-rw-r--r--nova/compute/flavors.py11
-rw-r--r--nova/compute/manager.py169
-rw-r--r--nova/compute/resource_tracker.py111
-rw-r--r--nova/compute/utils.py31
-rw-r--r--nova/conductor/manager.py10
-rw-r--r--nova/db/sqlalchemy/api.py22
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/402_train.py3
-rw-r--r--nova/network/neutron.py4
-rw-r--r--nova/notifications/base.py8
-rw-r--r--nova/objects/image_meta.py8
-rw-r--r--nova/objects/instance.py2
-rw-r--r--nova/objects/migration.py2
-rw-r--r--nova/objects/request_spec.py7
-rw-r--r--nova/scheduler/filters/aggregate_instance_extra_specs.py27
-rw-r--r--nova/scheduler/filters/compute_capabilities_filter.py16
-rw-r--r--nova/scheduler/filters/type_filter.py10
-rw-r--r--nova/scheduler/utils.py36
-rw-r--r--nova/test.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1806064.py4
-rw-r--r--nova/tests/functional/regressions/test_bug_1843090.py7
-rw-r--r--nova/tests/functional/regressions/test_bug_1845291.py7
-rw-r--r--nova/tests/functional/regressions/test_bug_1928063.py12
-rw-r--r--nova/tests/unit/api/openstack/compute/test_disk_config.py25
-rw-r--r--nova/tests/unit/api/openstack/compute/test_evacuate.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py24
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrate_server.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_actions.py45
-rw-r--r--nova/tests/unit/api/openstack/compute/test_servers.py41
-rw-r--r--nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_volumes.py6
-rw-r--r--nova/tests/unit/api/openstack/fakes.py50
-rw-r--r--nova/tests/unit/compute/test_api.py137
-rw-r--r--nova/tests/unit/compute/test_claims.py30
-rw-r--r--nova/tests/unit/compute/test_compute.py315
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py116
-rw-r--r--nova/tests/unit/compute/test_keypairs.py6
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py152
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py13
-rw-r--r--nova/tests/unit/conductor/tasks/test_migrate.py4
-rw-r--r--nova/tests/unit/conductor/test_conductor.py20
-rw-r--r--nova/tests/unit/db/fakes.py26
-rw-r--r--nova/tests/unit/db/test_migration_utils.py35
-rw-r--r--nova/tests/unit/fake_instance.py8
-rw-r--r--nova/tests/unit/network/test_neutron.py2
-rw-r--r--nova/tests/unit/objects/test_instance.py8
-rw-r--r--nova/tests/unit/objects/test_objects.py5
-rw-r--r--nova/tests/unit/objects/test_request_spec.py13
-rw-r--r--nova/tests/unit/scheduler/test_scheduler_utils.py20
-rw-r--r--nova/tests/unit/test_flavor_extra_specs.py (renamed from nova/tests/unit/test_instance_types_extra_specs.py)27
-rw-r--r--nova/tests/unit/test_flavors.py56
-rw-r--r--nova/tests/unit/test_notifications.py6
-rw-r--r--nova/tests/unit/test_quota.py35
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py47
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py58
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py166
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py8
-rw-r--r--nova/tests/unit/virt/libvirt/test_machine_type_utils.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_migration.py4
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py4
-rw-r--r--nova/tests/unit/virt/test_configdrive.py5
-rw-r--r--nova/tests/unit/virt/test_hardware.py8
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_driver_api.py43
-rw-r--r--nova/utils.py8
-rw-r--r--nova/virt/driver.py8
-rw-r--r--nova/virt/fake.py5
-rw-r--r--nova/virt/hardware.py4
-rw-r--r--nova/virt/hyperv/driver.py5
-rw-r--r--nova/virt/ironic/driver.py5
-rw-r--r--nova/virt/libvirt/driver.py44
-rw-r--r--nova/virt/vmwareapi/driver.py5
76 files changed, 1439 insertions, 1153 deletions
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index 5c097e4525..aef1354ab3 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -279,8 +279,8 @@ class InstanceMetadata(object):
meta_data['public-ipv4'] = floating_ip
if self._check_version('2007-08-29', version):
- instance_type = self.instance.get_flavor()
- meta_data['instance-type'] = instance_type['name']
+ flavor = self.instance.get_flavor()
+ meta_data['instance-type'] = flavor['name']
if self._check_version('2007-12-15', version):
meta_data['block-device-mapping'] = self.mappings
diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py
index 6c9f1006e6..52d864470b 100644
--- a/nova/api/openstack/compute/views/servers.py
+++ b/nova/api/openstack/compute/views/servers.py
@@ -547,34 +547,32 @@ class ViewBuilder(common.ViewBuilder):
else:
return ""
- def _get_flavor_dict(self, request, instance_type, show_extra_specs):
+ def _get_flavor_dict(self, request, flavor, show_extra_specs):
flavordict = {
- "vcpus": instance_type.vcpus,
- "ram": instance_type.memory_mb,
- "disk": instance_type.root_gb,
- "ephemeral": instance_type.ephemeral_gb,
- "swap": instance_type.swap,
- "original_name": instance_type.name
+ "vcpus": flavor.vcpus,
+ "ram": flavor.memory_mb,
+ "disk": flavor.root_gb,
+ "ephemeral": flavor.ephemeral_gb,
+ "swap": flavor.swap,
+ "original_name": flavor.name
}
if show_extra_specs:
- flavordict['extra_specs'] = instance_type.extra_specs
+ flavordict['extra_specs'] = flavor.extra_specs
return flavordict
def _get_flavor(self, request, instance, show_extra_specs):
- instance_type = instance.get_flavor()
- if not instance_type:
- LOG.warning("Instance has had its instance_type removed "
+ flavor = instance.get_flavor()
+ if not flavor:
+ LOG.warning("Instance has had its flavor removed "
"from the DB", instance=instance)
return {}
if api_version_request.is_supported(request, min_version="2.47"):
- return self._get_flavor_dict(request, instance_type,
- show_extra_specs)
+ return self._get_flavor_dict(request, flavor, show_extra_specs)
- flavor_id = instance_type["flavorid"]
- flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
- flavor_id,
- "flavors")
+ flavor_id = flavor["flavorid"]
+ flavor_bookmark = self._flavor_builder._get_bookmark_link(
+ request, flavor_id, "flavors")
return {
"id": str(flavor_id),
"links": [{
diff --git a/nova/cmd/common.py b/nova/cmd/common.py
index c3f62fa57e..1d4d5b4612 100644
--- a/nova/cmd/common.py
+++ b/nova/cmd/common.py
@@ -18,6 +18,7 @@
"""
import argparse
+import inspect
import traceback
from oslo_log import log as logging
@@ -26,7 +27,6 @@ import nova.conf
import nova.db.api
from nova import exception
from nova.i18n import _
-from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -65,7 +65,7 @@ def validate_args(fn, *args, **kwargs):
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
- argspec = utils.getargspec(fn)
+ argspec = inspect.getfullargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 4151bf8a8b..87afce10d5 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -583,8 +583,9 @@ class API(base.Base):
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
- def _validate_flavor_image(self, context, image_id, image,
- instance_type, root_bdm, validate_numa=True):
+ def _validate_flavor_image(
+ self, context, image_id, image, flavor, root_bdm, validate_numa=True,
+ ):
"""Validate the flavor and image.
This is called from the API service to ensure that the flavor
@@ -594,20 +595,20 @@ class API(base.Base):
:param context: A context.RequestContext
:param image_id: UUID of the image
:param image: a dict representation of the image including properties,
- enforces the image status is active.
- :param instance_type: Flavor object
+ enforces the image status is active.
+ :param flavor: Flavor object
:param root_bdm: BlockDeviceMapping for root disk. Will be None for
- the resize case.
+ the resize case.
:param validate_numa: Flag to indicate whether or not to validate
- the NUMA-related metadata.
- :raises: Many different possible exceptions. See
- api.openstack.compute.servers.INVALID_FLAVOR_IMAGE_EXCEPTIONS
- for the full list.
+ the NUMA-related metadata.
+ :raises: Many different possible exceptions. See
+ api.openstack.compute.servers.INVALID_FLAVOR_IMAGE_EXCEPTIONS
+ for the full list.
"""
if image and image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
- self._validate_flavor_image_nostatus(context, image, instance_type,
- root_bdm, validate_numa)
+ self._validate_flavor_image_nostatus(
+ context, image, flavor, root_bdm, validate_numa)
@staticmethod
def _detect_nonbootable_image_from_properties(image_id, image):
@@ -640,9 +641,10 @@ class API(base.Base):
reason=reason)
@staticmethod
- def _validate_flavor_image_nostatus(context, image, instance_type,
- root_bdm, validate_numa=True,
- validate_pci=False):
+ def _validate_flavor_image_nostatus(
+ context, image, flavor, root_bdm, validate_numa=True,
+ validate_pci=False,
+ ):
"""Validate the flavor and image.
This is called from the API service to ensure that the flavor
@@ -651,7 +653,7 @@ class API(base.Base):
:param context: A context.RequestContext
:param image: a dict representation of the image including properties
- :param instance_type: Flavor object
+ :param flavor: Flavor object
:param root_bdm: BlockDeviceMapping for root disk. Will be None for
the resize case.
:param validate_numa: Flag to indicate whether or not to validate
@@ -672,7 +674,7 @@ class API(base.Base):
raise exception.InvalidImageConfigDrive(
config_drive=config_drive_option)
- if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
+ if flavor['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# Image min_disk is in gb, size is in bytes. For sanity, have them both
@@ -728,7 +730,7 @@ class API(base.Base):
# Target disk is a local disk whose size is taken from the flavor
else:
- dest_size = instance_type['root_gb'] * units.Gi
+ dest_size = flavor['root_gb'] * units.Gi
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
@@ -752,7 +754,7 @@ class API(base.Base):
raise exception.BootFromVolumeRequiredForZeroDiskFlavor()
API._validate_flavor_image_numa_pci(
- image, instance_type, validate_numa=validate_numa,
+ image, flavor, validate_numa=validate_numa,
validate_pci=validate_pci)
# TODO(huaqiang): Remove in Wallaby when there is no nova-compute node
@@ -778,9 +780,9 @@ class API(base.Base):
raise exception.MixedInstanceNotSupportByComputeService()
@staticmethod
- def _validate_flavor_image_numa_pci(image, instance_type,
- validate_numa=True,
- validate_pci=False):
+ def _validate_flavor_image_numa_pci(
+ image, flavor, validate_numa=True, validate_pci=False,
+ ):
"""Validate the flavor and image NUMA/PCI values.
This is called from the API service to ensure that the flavor
@@ -788,7 +790,7 @@ class API(base.Base):
with each other.
:param image: a dict representation of the image including properties
- :param instance_type: Flavor object
+ :param flavor: Flavor object
:param validate_numa: Flag to indicate whether or not to validate
the NUMA-related metadata.
:param validate_pci: Flag to indicate whether or not to validate
@@ -799,10 +801,10 @@ class API(base.Base):
"""
image_meta = _get_image_meta_obj(image)
- API._validate_flavor_image_mem_encryption(instance_type, image_meta)
+ API._validate_flavor_image_mem_encryption(flavor, image_meta)
# validate PMU extra spec and image metadata
- flavor_pmu = instance_type.extra_specs.get('hw:pmu')
+ flavor_pmu = flavor.extra_specs.get('hw:pmu')
image_pmu = image_meta.properties.get('hw_pmu')
if (flavor_pmu is not None and image_pmu is not None and
image_pmu != strutils.bool_from_string(flavor_pmu)):
@@ -810,29 +812,28 @@ class API(base.Base):
# Only validate values of flavor/image so the return results of
# following 'get' functions are not used.
- hardware.get_number_of_serial_ports(instance_type, image_meta)
- hardware.get_realtime_cpu_constraint(instance_type, image_meta)
- hardware.get_cpu_topology_constraints(instance_type, image_meta)
+ hardware.get_number_of_serial_ports(flavor, image_meta)
+ hardware.get_realtime_cpu_constraint(flavor, image_meta)
+ hardware.get_cpu_topology_constraints(flavor, image_meta)
if validate_numa:
- hardware.numa_get_constraints(instance_type, image_meta)
+ hardware.numa_get_constraints(flavor, image_meta)
if validate_pci:
- pci_request.get_pci_requests_from_flavor(instance_type)
+ pci_request.get_pci_requests_from_flavor(flavor)
@staticmethod
- def _validate_flavor_image_mem_encryption(instance_type, image):
+ def _validate_flavor_image_mem_encryption(flavor, image):
"""Validate that the flavor and image don't make contradictory
requests regarding memory encryption.
- :param instance_type: Flavor object
+ :param flavor: Flavor object
:param image: an ImageMeta object
:raises: nova.exception.FlavorImageConflict
"""
# This library function will raise the exception for us if
# necessary; if not, we can ignore the result returned.
- hardware.get_mem_encryption_constraint(instance_type, image)
+ hardware.get_mem_encryption_constraint(flavor, image)
- def _get_image_defined_bdms(self, instance_type, image_meta,
- root_device_name):
+ def _get_image_defined_bdms(self, flavor, image_meta, root_device_name):
image_properties = image_meta.get('properties', {})
# Get the block device mappings defined by the image.
@@ -849,14 +850,13 @@ class API(base.Base):
image_defined_bdms))
if image_mapping:
- image_mapping = self._prepare_image_mapping(instance_type,
- image_mapping)
+ image_mapping = self._prepare_image_mapping(flavor, image_mapping)
image_defined_bdms = self._merge_bdms_lists(
image_mapping, image_defined_bdms)
return image_defined_bdms
- def _get_flavor_defined_bdms(self, instance_type, block_device_mapping):
+ def _get_flavor_defined_bdms(self, flavor, block_device_mapping):
flavor_defined_bdms = []
have_ephemeral_bdms = any(filter(
@@ -864,12 +864,12 @@ class API(base.Base):
have_swap_bdms = any(filter(
block_device.new_format_is_swap, block_device_mapping))
- if instance_type.get('ephemeral_gb') and not have_ephemeral_bdms:
+ if flavor.get('ephemeral_gb') and not have_ephemeral_bdms:
flavor_defined_bdms.append(
- block_device.create_blank_bdm(instance_type['ephemeral_gb']))
- if instance_type.get('swap') and not have_swap_bdms:
+ block_device.create_blank_bdm(flavor['ephemeral_gb']))
+ if flavor.get('swap') and not have_swap_bdms:
flavor_defined_bdms.append(
- block_device.create_blank_bdm(instance_type['swap'], 'swap'))
+ block_device.create_blank_bdm(flavor['swap'], 'swap'))
return flavor_defined_bdms
@@ -887,9 +887,10 @@ class API(base.Base):
[bdm for bdm in overridable_mappings
if bdm['device_name'] not in device_names])
- def _check_and_transform_bdm(self, context, base_options, instance_type,
- image_meta, min_count, max_count,
- block_device_mapping, legacy_bdm):
+ def _check_and_transform_bdm(
+ self, context, base_options, flavor, image_meta, min_count, max_count,
+ block_device_mapping, legacy_bdm,
+ ):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
@@ -906,7 +907,7 @@ class API(base.Base):
raise exception.InvalidRequest(msg)
image_defined_bdms = self._get_image_defined_bdms(
- instance_type, image_meta, root_device_name)
+ flavor, image_meta, root_device_name)
root_in_image_bdms = (
block_device.get_root_bdm(image_defined_bdms) is not None)
@@ -942,7 +943,7 @@ class API(base.Base):
raise exception.InvalidRequest(msg)
block_device_mapping += self._get_flavor_defined_bdms(
- instance_type, block_device_mapping)
+ flavor, block_device_mapping)
return block_device_obj.block_device_make_list_from_dicts(
context, block_device_mapping)
@@ -954,33 +955,30 @@ class API(base.Base):
image = self.image_api.get(context, image_href)
return image['id'], image
- def _checks_for_create_and_rebuild(self, context, image_id, image,
- instance_type, metadata,
- files_to_inject, root_bdm,
- validate_numa=True):
+ def _checks_for_create_and_rebuild(
+ self, context, image_id, image, flavor, metadata, files_to_inject,
+ root_bdm, validate_numa=True,
+ ):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
self._detect_nonbootable_image_from_properties(image_id, image)
self._validate_flavor_image(context, image_id, image,
- instance_type, root_bdm,
+ flavor, root_bdm,
validate_numa=validate_numa)
- def _validate_and_build_base_options(self, context, instance_type,
- boot_meta, image_href, image_id,
- kernel_id, ramdisk_id, display_name,
- display_description, key_name,
- key_data, security_groups,
- availability_zone, user_data,
- metadata, access_ip_v4, access_ip_v6,
- requested_networks, config_drive,
- auto_disk_config, reservation_id,
- max_count,
- supports_port_resource_request):
+ def _validate_and_build_base_options(
+ self, context, flavor, boot_meta, image_href, image_id, kernel_id,
+ ramdisk_id, display_name, display_description, key_name,
+ key_data, security_groups, availability_zone, user_data, metadata,
+ access_ip_v4, access_ip_v6, requested_networks, config_drive,
+ auto_disk_config, reservation_id, max_count,
+ supports_port_resource_request,
+ ):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
- if instance_type['disabled']:
- raise exception.FlavorNotFound(flavor_id=instance_type['id'])
+ if flavor['disabled']:
+ raise exception.FlavorNotFound(flavor_id=flavor['id'])
if user_data:
try:
@@ -1017,13 +1015,12 @@ class API(base.Base):
boot_meta.get('properties', {})))
image_meta = _get_image_meta_obj(boot_meta)
- numa_topology = hardware.numa_get_constraints(
- instance_type, image_meta)
+ numa_topology = hardware.numa_get_constraints(flavor, image_meta)
system_metadata = {}
pci_numa_affinity_policy = hardware.get_pci_numa_policy_constraint(
- instance_type, image_meta)
+ flavor, image_meta)
# PCI requests come from two sources: instance flavor and
# requested_networks. The first call in below returns an
@@ -1032,7 +1029,7 @@ class API(base.Base):
# object for each SR-IOV port, and append it to the list in the
# InstancePCIRequests object
pci_request_info = pci_request.get_pci_requests_from_flavor(
- instance_type, affinity_policy=pci_numa_affinity_policy)
+ flavor, affinity_policy=pci_numa_affinity_policy)
result = self.network_api.create_resource_requests(
context, requested_networks, pci_request_info,
affinity_policy=pci_numa_affinity_policy)
@@ -1054,11 +1051,11 @@ class API(base.Base):
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
- 'instance_type_id': instance_type['id'],
- 'memory_mb': instance_type['memory_mb'],
- 'vcpus': instance_type['vcpus'],
- 'root_gb': instance_type['root_gb'],
- 'ephemeral_gb': instance_type['ephemeral_gb'],
+ 'instance_type_id': flavor['id'],
+ 'memory_mb': flavor['memory_mb'],
+ 'vcpus': flavor['vcpus'],
+ 'root_gb': flavor['root_gb'],
+ 'ephemeral_gb': flavor['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description,
'user_data': user_data,
@@ -1240,13 +1237,15 @@ class API(base.Base):
'instance_az': instance_az, 'volume_az': volume_az}
raise exception.MismatchVolumeAZException(reason=msg)
- def _provision_instances(self, context, instance_type, min_count,
- max_count, base_options, boot_meta, security_groups,
- block_device_mapping, shutdown_terminate,
- instance_group, check_server_group_quota, filter_properties,
- key_pair, tags, trusted_certs, supports_multiattach,
- network_metadata=None, requested_host=None,
- requested_hypervisor_hostname=None):
+ def _provision_instances(
+ self, context, flavor, min_count,
+ max_count, base_options, boot_meta, security_groups,
+ block_device_mapping, shutdown_terminate,
+ instance_group, check_server_group_quota, filter_properties,
+ key_pair, tags, trusted_certs, supports_multiattach,
+ network_metadata=None, requested_host=None,
+ requested_hypervisor_hostname=None,
+ ):
# NOTE(boxiang): Check whether compute nodes exist by validating
# the host and/or the hypervisor_hostname. Pass the destination
# to the scheduler with host and/or hypervisor_hostname(node).
@@ -1260,9 +1259,9 @@ class API(base.Base):
destination.node = requested_hypervisor_hostname
# Check quotas
num_instances = compute_utils.check_num_instances_quota(
- context, instance_type, min_count, max_count)
+ context, flavor, min_count, max_count)
security_groups = security_group_api.populate_security_groups(
- security_groups)
+ security_groups)
port_resource_requests = base_options.pop('port_resource_requests')
instances_to_build = []
# We could be iterating over several instances with several BDMs per
@@ -1281,20 +1280,20 @@ class API(base.Base):
# base_options to match the volume zone.
base_options['availability_zone'] = volume_az
LOG.debug("Going to run %s instances...", num_instances)
- extra_specs = instance_type.extra_specs
+ extra_specs = flavor.extra_specs
dp_name = extra_specs.get('accel:device_profile')
dp_request_groups = []
if dp_name:
dp_request_groups = cyborg.get_device_profile_request_groups(
context, dp_name)
try:
- for i in range(num_instances):
+ for idx in range(num_instances):
# Create a uuid for the instance so we can store the
# RequestSpec before the instance is created.
instance_uuid = uuidutils.generate_uuid()
# Store the RequestSpec that will be used for scheduling.
req_spec = objects.RequestSpec.from_components(context,
- instance_uuid, boot_meta, instance_type,
+ instance_uuid, boot_meta, flavor,
base_options['numa_topology'],
base_options['pci_requests'], filter_properties,
instance_group, base_options['availability_zone'],
@@ -1337,13 +1336,13 @@ class API(base.Base):
context, trusted_certs)
self._populate_instance_for_create(
- context, instance, boot_meta, i,
- security_groups, instance_type,
+ context, instance, boot_meta, idx,
+ security_groups, flavor,
num_instances, shutdown_terminate)
block_device_mapping = (
self._bdm_validate_set_size_and_instance(context,
- instance, instance_type, block_device_mapping,
+ instance, flavor, block_device_mapping,
image_cache, volumes, supports_multiattach))
instance_tags = self._transform_tags(tags, instance.uuid)
@@ -1460,7 +1459,7 @@ class API(base.Base):
return objects.InstanceGroup.get_by_uuid(context, group_hint)
- def _create_instance(self, context, instance_type,
+ def _create_instance(self, context, flavor,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
@@ -1507,14 +1506,17 @@ class API(base.Base):
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
- base_options, max_net_count, key_pair, security_groups, \
- network_metadata = self._validate_and_build_base_options(
- context, instance_type, boot_meta, image_href, image_id,
- kernel_id, ramdisk_id, display_name, display_description,
- key_name, key_data, security_groups, availability_zone,
- user_data, metadata, access_ip_v4, access_ip_v6,
- requested_networks, config_drive, auto_disk_config,
- reservation_id, max_count, supports_port_resource_request)
+ (
+ base_options, max_net_count, key_pair, security_groups,
+ network_metadata,
+ ) = self._validate_and_build_base_options(
+ context, flavor, boot_meta, image_href, image_id,
+ kernel_id, ramdisk_id, display_name, display_description,
+ key_name, key_data, security_groups, availability_zone,
+ user_data, metadata, access_ip_v4, access_ip_v6,
+ requested_networks, config_drive, auto_disk_config,
+ reservation_id, max_count, supports_port_resource_request,
+ )
# TODO(huaqiang): Remove in Wallaby
# check nova-compute nodes have been updated to Victoria to support the
@@ -1535,7 +1537,7 @@ class API(base.Base):
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(context,
- base_options, instance_type, boot_meta, min_count, max_count,
+ base_options, flavor, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
# We can't do this check earlier because we need bdms from all sources
@@ -1543,7 +1545,7 @@ class API(base.Base):
# Set validate_numa=False since numa validation is already done by
# _validate_and_build_base_options().
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
- instance_type, metadata, injected_files,
+ flavor, metadata, injected_files,
block_device_mapping.root_bdm(), validate_numa=False)
instance_group = self._get_requested_instance_group(context,
@@ -1552,7 +1554,7 @@ class API(base.Base):
tags = self._create_tag_list_obj(context, tags)
instances_to_build = self._provision_instances(
- context, instance_type, min_count, max_count, base_options,
+ context, flavor, min_count, max_count, base_options,
boot_meta, security_groups, block_device_mapping,
shutdown_terminate, instance_group, check_server_group_quota,
filter_properties, key_pair, tags, trusted_certs,
@@ -1611,18 +1613,18 @@ class API(base.Base):
pass
@staticmethod
- def _volume_size(instance_type, bdm):
+ def _volume_size(flavor, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
- size = instance_type.get('swap', 0)
+ size = flavor.get('swap', 0)
else:
- size = instance_type.get('ephemeral_gb', 0)
+ size = flavor.get('ephemeral_gb', 0)
return size
- def _prepare_image_mapping(self, instance_type, mappings):
+ def _prepare_image_mapping(self, flavor, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
@@ -1653,7 +1655,7 @@ class API(base.Base):
'boot_index': -1})
values['volume_size'] = self._volume_size(
- instance_type, values)
+ flavor, values)
if values['volume_size'] == 0:
continue
@@ -1662,7 +1664,7 @@ class API(base.Base):
return prepared_mappings
def _bdm_validate_set_size_and_instance(self, context, instance,
- instance_type,
+ flavor,
block_device_mapping,
image_cache, volumes,
supports_multiattach=False):
@@ -1673,7 +1675,7 @@ class API(base.Base):
:param context: nova auth RequestContext
:param instance: Instance object
- :param instance_type: Flavor object - used for swap and ephemeral BDMs
+ :param flavor: Flavor object - used for swap and ephemeral BDMs
:param block_device_mapping: BlockDeviceMappingList object
:param image_cache: dict of image dicts keyed by id which is used as a
cache in case there are multiple BDMs in the same request using
@@ -1685,11 +1687,11 @@ class API(base.Base):
LOG.debug("block_device_mapping %s", list(block_device_mapping),
instance_uuid=instance.uuid)
self._validate_bdm(
- context, instance, instance_type, block_device_mapping,
+ context, instance, flavor, block_device_mapping,
image_cache, volumes, supports_multiattach)
instance_block_device_mapping = block_device_mapping.obj_clone()
for bdm in instance_block_device_mapping:
- bdm.volume_size = self._volume_size(instance_type, bdm)
+ bdm.volume_size = self._volume_size(flavor, bdm)
bdm.instance_uuid = instance.uuid
return instance_block_device_mapping
@@ -1713,14 +1715,15 @@ class API(base.Base):
raise exception.VolumeTypeNotFound(
id_or_name=volume_type_id_or_name)
- def _validate_bdm(self, context, instance, instance_type,
- block_device_mappings, image_cache, volumes,
- supports_multiattach=False):
+ def _validate_bdm(
+ self, context, instance, flavor, block_device_mappings, image_cache,
+ volumes, supports_multiattach=False,
+ ):
"""Validate requested block device mappings.
:param context: nova auth RequestContext
:param instance: Instance object
- :param instance_type: Flavor object - used for swap and ephemeral BDMs
+ :param flavor: Flavor object - used for swap and ephemeral BDMs
:param block_device_mappings: BlockDeviceMappingList object
:param image_cache: dict of image dicts keyed by id which is used as a
cache in case there are multiple BDMs in the same request using
@@ -1824,10 +1827,10 @@ class API(base.Base):
if disk_bus and disk_bus not in fields_obj.DiskBus.ALL:
raise exception.InvalidBDMDiskBus(disk_bus=disk_bus)
- ephemeral_size = sum(bdm.volume_size or instance_type['ephemeral_gb']
+ ephemeral_size = sum(bdm.volume_size or flavor['ephemeral_gb']
for bdm in block_device_mappings
if block_device.new_format_is_ephemeral(bdm))
- if ephemeral_size > instance_type['ephemeral_gb']:
+ if ephemeral_size > flavor['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
@@ -1838,7 +1841,7 @@ class API(base.Base):
if swap_list:
swap_size = swap_list[0].volume_size or 0
- if swap_size > instance_type['swap']:
+ if swap_size > flavor['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
@@ -1881,9 +1884,10 @@ class API(base.Base):
instance.display_name = new_display_name
- def _populate_instance_for_create(self, context, instance, image,
- index, security_groups, instance_type,
- num_instances, shutdown_terminate):
+ def _populate_instance_for_create(
+ self, context, instance, image, index, security_groups, flavor,
+ num_instances, shutdown_terminate,
+ ):
"""Build the beginning of a new instance."""
instance.launch_index = index
@@ -1893,7 +1897,7 @@ class API(base.Base):
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
- instance.flavor = instance_type
+ instance.flavor = flavor
instance.old_flavor = None
instance.new_flavor = None
if CONF.ephemeral_storage_encryption.enabled:
@@ -1918,7 +1922,7 @@ class API(base.Base):
instance.system_metadata = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
- image, instance_type)
+ image, flavor)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance.image_ref)
@@ -1965,8 +1969,7 @@ class API(base.Base):
tag.resource_id = resource_id
return instance_tags
- def _check_multiple_instances_with_neutron_ports(self,
- requested_networks):
+ def _check_multiple_instances_with_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for requested_net in requested_networks:
if requested_net.port_id:
@@ -1984,21 +1987,23 @@ class API(base.Base):
"is specified.")
raise exception.InvalidFixedIpAndMaxCountRequest(reason=msg)
- def create(self, context, instance_type,
- image_href, kernel_id=None, ramdisk_id=None,
- min_count=None, max_count=None,
- display_name=None, display_description=None,
- key_name=None, key_data=None, security_groups=None,
- availability_zone=None, forced_host=None, forced_node=None,
- user_data=None, metadata=None, injected_files=None,
- admin_password=None, block_device_mapping=None,
- access_ip_v4=None, access_ip_v6=None, requested_networks=None,
- config_drive=None, auto_disk_config=None, scheduler_hints=None,
- legacy_bdm=True, shutdown_terminate=False,
- check_server_group_quota=False, tags=None,
- supports_multiattach=False, trusted_certs=None,
- supports_port_resource_request=False,
- requested_host=None, requested_hypervisor_hostname=None):
+ def create(
+ self, context, flavor,
+ image_href, kernel_id=None, ramdisk_id=None,
+ min_count=None, max_count=None,
+ display_name=None, display_description=None,
+ key_name=None, key_data=None, security_groups=None,
+ availability_zone=None, forced_host=None, forced_node=None,
+ user_data=None, metadata=None, injected_files=None,
+ admin_password=None, block_device_mapping=None,
+ access_ip_v4=None, access_ip_v6=None, requested_networks=None,
+ config_drive=None, auto_disk_config=None, scheduler_hints=None,
+ legacy_bdm=True, shutdown_terminate=False,
+ check_server_group_quota=False, tags=None,
+ supports_multiattach=False, trusted_certs=None,
+ supports_port_resource_request=False,
+ requested_host=None, requested_hypervisor_hostname=None,
+ ):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
@@ -2021,10 +2026,10 @@ class API(base.Base):
raise exception.InvalidRequest(msg)
filter_properties = scheduler_utils.build_filter_properties(
- scheduler_hints, forced_host, forced_node, instance_type)
+ scheduler_hints, forced_host, forced_node, flavor)
return self._create_instance(
- context, instance_type,
+ context, flavor,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
@@ -3972,14 +3977,14 @@ class API(base.Base):
self._check_auto_disk_config(
instance, auto_disk_config=auto_disk_config)
- current_instance_type = instance.get_flavor()
+ current_flavor = instance.get_flavor()
# NOTE(aarents): Ensure image_base_image_ref is present as it will be
# needed during finish_resize/cross_cell_resize. Instances upgraded
# from an older nova release may not have this property because of
# a rebuild bug Bug/1893618.
instance.system_metadata.update(
- {'image_base_image_ref': instance.image_ref}
+ {'image_base_image_ref': instance.image_ref}
)
# If flavor_id is not provided, only migrate the instance.
@@ -3987,52 +3992,51 @@ class API(base.Base):
if not flavor_id:
LOG.debug("flavor_id is None. Assuming migration.",
instance=instance)
- new_instance_type = current_instance_type
+ new_flavor = current_flavor
else:
- new_instance_type = flavors.get_flavor_by_flavor_id(
- flavor_id, read_deleted="no")
+ new_flavor = flavors.get_flavor_by_flavor_id(
+ flavor_id, read_deleted="no")
# NOTE(wenping): We use this instead of the 'block_accelerator'
# decorator since the operation can differ depending on args,
# and for resize we have two flavors to worry about, we should
# reject resize with new flavor with accelerator.
- if new_instance_type.extra_specs.get('accel:device_profile'):
+ if new_flavor.extra_specs.get('accel:device_profile'):
raise exception.ForbiddenWithAccelerators()
# Check to see if we're resizing to a zero-disk flavor which is
# only supported with volume-backed servers.
- if (new_instance_type.get('root_gb') == 0 and
- current_instance_type.get('root_gb') != 0):
+ if (new_flavor.get('root_gb') == 0 and
+ current_flavor.get('root_gb') != 0):
volume_backed = compute_utils.is_volume_backed_instance(
context, instance)
if not volume_backed:
reason = _('Resize to zero disk flavor is not allowed.')
raise exception.CannotResizeDisk(reason=reason)
- current_instance_type_name = current_instance_type['name']
- new_instance_type_name = new_instance_type['name']
- LOG.debug("Old instance type %(current_instance_type_name)s, "
- "new instance type %(new_instance_type_name)s",
- {'current_instance_type_name': current_instance_type_name,
- 'new_instance_type_name': new_instance_type_name},
+ current_flavor_name = current_flavor['name']
+ new_flavor_name = new_flavor['name']
+ LOG.debug("Old instance type %(current_flavor_name)s, "
+ "new instance type %(new_flavor_name)s",
+ {'current_flavor_name': current_flavor_name,
+ 'new_flavor_name': new_flavor_name},
instance=instance)
- same_instance_type = (current_instance_type['id'] ==
- new_instance_type['id'])
+ same_flavor = current_flavor['id'] == new_flavor['id']
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
- if not same_instance_type and new_instance_type.get('disabled'):
+ if not same_flavor and new_flavor.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
- if same_instance_type and flavor_id:
+ if same_flavor and flavor_id:
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
if flavor_id:
self._check_quota_for_upsize(context, instance,
- current_instance_type,
- new_instance_type)
+ current_flavor,
+ new_flavor)
- if not same_instance_type:
+ if not same_flavor:
image = utils.get_image_from_system_metadata(
instance.system_metadata)
# Figure out if the instance is volume-backed but only if we didn't
@@ -4047,14 +4051,14 @@ class API(base.Base):
# resize case.
if volume_backed:
self._validate_flavor_image_numa_pci(
- image, new_instance_type, validate_pci=True)
+ image, new_flavor, validate_pci=True)
else:
self._validate_flavor_image_nostatus(
- context, image, new_instance_type, root_bdm=None,
+ context, image, new_flavor, root_bdm=None,
validate_pci=True)
filter_properties = {'ignore_hosts': []}
- if not self._allow_resize_to_same_host(same_instance_type, instance):
+ if not self._allow_resize_to_same_host(same_flavor, instance):
filter_properties['ignore_hosts'].append(instance.host)
request_spec = objects.RequestSpec.get_by_instance_uuid(
@@ -4062,9 +4066,9 @@ class API(base.Base):
request_spec.ignore_hosts = filter_properties['ignore_hosts']
# don't recalculate the NUMA topology unless the flavor has changed
- if not same_instance_type:
+ if not same_flavor:
request_spec.numa_topology = hardware.numa_get_constraints(
- new_instance_type, instance.image_meta)
+ new_flavor, instance.image_meta)
# TODO(huaqiang): Remove in Wallaby
# check nova-compute nodes have been updated to Victoria to resize
# instance to a new mixed instance from a dedicated or shared
@@ -4107,9 +4111,10 @@ class API(base.Base):
# Asynchronously RPC cast to conductor so the response is not blocked
# during scheduling. If something fails the user can find out via
# instance actions.
- self.compute_task_api.resize_instance(context, instance,
+ self.compute_task_api.resize_instance(
+ context, instance,
scheduler_hint=scheduler_hint,
- flavor=new_instance_type,
+ flavor=new_flavor,
clean_shutdown=clean_shutdown,
request_spec=request_spec,
do_cast=True)
diff --git a/nova/compute/claims.py b/nova/compute/claims.py
index bcc6f30885..eb6f11d220 100644
--- a/nova/compute/claims.py
+++ b/nova/compute/claims.py
@@ -57,9 +57,11 @@ class Claim(NopClaim):
correct decisions with respect to host selection.
"""
- def __init__(self, context, instance, nodename, tracker, compute_node,
- pci_requests, migration=None, limits=None):
- super(Claim, self).__init__(migration=migration)
+ def __init__(
+ self, context, instance, nodename, tracker, compute_node, pci_requests,
+ migration=None, limits=None,
+ ):
+ super().__init__(migration=migration)
# Stash a copy of the instance at the current point of time
self.instance = instance.obj_clone()
self.nodename = nodename
@@ -159,21 +161,24 @@ class MoveClaim(Claim):
Move can be either a migrate/resize, live-migrate or an evacuate operation.
"""
- def __init__(self, context, instance, nodename, instance_type, image_meta,
- tracker, compute_node, pci_requests, migration, limits=None):
+ def __init__(
+ self, context, instance, nodename, flavor, image_meta, tracker,
+ compute_node, pci_requests, migration, limits=None,
+ ):
self.context = context
- self.instance_type = instance_type
+ self.flavor = flavor
if isinstance(image_meta, dict):
image_meta = objects.ImageMeta.from_dict(image_meta)
self.image_meta = image_meta
- super(MoveClaim, self).__init__(context, instance, nodename, tracker,
- compute_node, pci_requests,
- migration=migration, limits=limits)
+
+ super().__init__(
+ context, instance, nodename, tracker, compute_node, pci_requests,
+ migration=migration, limits=limits,
+ )
@property
def numa_topology(self):
- return hardware.numa_get_constraints(self.instance_type,
- self.image_meta)
+ return hardware.numa_get_constraints(self.flavor, self.image_meta)
def abort(self):
"""Compute operation requiring claimed resources has failed or
@@ -183,7 +188,7 @@ class MoveClaim(Claim):
self.tracker.drop_move_claim(
self.context,
self.instance, self.nodename,
- instance_type=self.instance_type)
+ flavor=self.flavor)
self.instance.drop_migration_context()
def _test_pci(self):
diff --git a/nova/compute/flavors.py b/nova/compute/flavors.py
index 484d8ffd0c..b73cf433bb 100644
--- a/nova/compute/flavors.py
+++ b/nova/compute/flavors.py
@@ -173,26 +173,27 @@ def extract_flavor(instance, prefix=''):
# NOTE(danms): This method is deprecated, do not use it!
# Use instance.{old_,new_,}flavor instead, as instances no longer
# have flavor information in system_metadata.
-def save_flavor_info(metadata, instance_type, prefix=''):
- """Save properties from instance_type into instance's system_metadata,
+# NOTE(stephenfin): 'prefix' is unused and could be removed
+def save_flavor_info(metadata, flavor, prefix=''):
+ """Save properties from flavor into instance's system_metadata,
in the format of:
[prefix]instance_type_[key]
This can be used to update system_metadata in place from a type, as well
- as stash information about another instance_type for later use (such as
+ as stash information about another flavor for later use (such as
during resize).
"""
for key in system_metadata_flavor_props.keys():
to_key = '%sinstance_type_%s' % (prefix, key)
- metadata[to_key] = instance_type[key]
+ metadata[to_key] = flavor[key]
# NOTE(danms): We do NOT save all of extra_specs here, but only the
# NUMA-related ones that we need to avoid an uglier alternative. This
# should be replaced by a general split-out of flavor information from
# system_metadata very soon.
- extra_specs = instance_type.get('extra_specs', {})
+ extra_specs = flavor.get('extra_specs', {})
for extra_prefix in system_metadata_flavor_extra_props:
for key in extra_specs:
if key.startswith(extra_prefix):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 630645bb74..ab6039c445 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1610,7 +1610,11 @@ class ComputeManager(manager.Manager):
return [_decode(f) for f in injected_files]
def _validate_instance_group_policy(self, context, instance,
- scheduler_hints):
+ scheduler_hints=None):
+
+ if CONF.workarounds.disable_group_policy_check_upcall:
+ return
+
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# the policy. Since more than one instance may be scheduled at the
@@ -1619,29 +1623,63 @@ class ComputeManager(manager.Manager):
# multiple instances with an affinity policy could end up on different
# hosts. This is a validation step to make sure that starting the
# instance here doesn't violate the policy.
- group_hint = scheduler_hints.get('group')
- if not group_hint:
- return
-
- # The RequestSpec stores scheduler_hints as key=list pairs so we need
- # to check the type on the value and pull the single entry out. The
- # API request schema validates that the 'group' hint is a single value.
- if isinstance(group_hint, list):
- group_hint = group_hint[0]
+ if scheduler_hints is not None:
+ # only go through here if scheduler_hints is provided, even if it
+ # is empty.
+ group_hint = scheduler_hints.get('group')
+ if not group_hint:
+ return
+ else:
+ # The RequestSpec stores scheduler_hints as key=list pairs so
+ # we need to check the type on the value and pull the single
+ # entry out. The API request schema validates that
+ # the 'group' hint is a single value.
+ if isinstance(group_hint, list):
+ group_hint = group_hint[0]
+
+ group = objects.InstanceGroup.get_by_hint(context, group_hint)
+ else:
+ # TODO(ganso): a call to DB can be saved by adding request_spec
+ # to rpcapi payload of live_migration, pre_live_migration and
+ # check_can_live_migrate_destination
+ try:
+ group = objects.InstanceGroup.get_by_instance_uuid(
+ context, instance.uuid)
+ except exception.InstanceGroupNotFound:
+ return
- @utils.synchronized(group_hint)
- def _do_validation(context, instance, group_hint):
- group = objects.InstanceGroup.get_by_hint(context, group_hint)
+ @utils.synchronized(group['uuid'])
+ def _do_validation(context, instance, group):
if group.policy and 'anti-affinity' == group.policy:
+
+ # instances on host
instances_uuids = objects.InstanceList.get_uuids_by_host(
context, self.host)
ins_on_host = set(instances_uuids)
+
+ # instance param is just for logging, the nodename obtained is
+ # not actually related to the instance at all
+ nodename = self._get_nodename(instance)
+
+ # instances being migrated to host
+ migrations = (
+ objects.MigrationList.get_in_progress_by_host_and_node(
+ context, self.host, nodename))
+ migration_vm_uuids = set([mig['instance_uuid']
+ for mig in migrations])
+
+ total_instances = migration_vm_uuids | ins_on_host
+
+ # refresh group to get updated members within locked block
+ group = objects.InstanceGroup.get_by_uuid(context,
+ group['uuid'])
members = set(group.members)
# Determine the set of instance group members on this host
# which are not the instance in question. This is used to
# determine how many other members from the same anti-affinity
# group can be on this host.
- members_on_host = ins_on_host & members - set([instance.uuid])
+ members_on_host = (total_instances & members -
+ set([instance.uuid]))
rules = group.rules
if rules and 'max_server_per_host' in rules:
max_server = rules['max_server_per_host']
@@ -1653,6 +1691,12 @@ class ComputeManager(manager.Manager):
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
+
+ # NOTE(ganso): The check for affinity below does not work and it
+ # can easily be violated because the lock happens in different
+ # compute hosts.
+ # The only fix seems to be a DB lock to perform the check whenever
+ # setting the host field to an instance.
elif group.policy and 'affinity' == group.policy:
group_hosts = group.get_hosts(exclude=[instance.uuid])
if group_hosts and self.host not in group_hosts:
@@ -1661,8 +1705,7 @@ class ComputeManager(manager.Manager):
instance_uuid=instance.uuid,
reason=msg)
- if not CONF.workarounds.disable_group_policy_check_upcall:
- _do_validation(context, instance, group_hint)
+ _do_validation(context, instance, group)
def _log_original_error(self, exc_info, instance_uuid):
LOG.error('Error: %s', exc_info[1], instance_uuid=instance_uuid,
@@ -5040,9 +5083,10 @@ class ComputeManager(manager.Manager):
instance.uuid)
return orig_alloc
- def _prep_resize(self, context, image, instance, instance_type,
- filter_properties, node, migration, request_spec,
- clean_shutdown=True):
+ def _prep_resize(
+ self, context, image, instance, flavor, filter_properties, node,
+ migration, request_spec, clean_shutdown=True,
+ ):
if not filter_properties:
filter_properties = {}
@@ -5054,7 +5098,7 @@ class ComputeManager(manager.Manager):
same_host = instance.host == self.host
# if the flavor IDs match, it's migrate; otherwise resize
- if same_host and instance_type.id == instance['instance_type_id']:
+ if same_host and flavor.id == instance['instance_type_id']:
# check driver whether support migrate to same host
if not self.driver.capabilities.get(
'supports_migrate_to_same_host', False):
@@ -5065,9 +5109,9 @@ class ComputeManager(manager.Manager):
inner_exception=exception.UnableToMigrateToSelf(
instance_id=instance.uuid, host=self.host))
- # NOTE(danms): Stash the new instance_type to avoid having to
+ # NOTE(danms): Stash the new flavor to avoid having to
# look it up in the database later
- instance.new_flavor = instance_type
+ instance.new_flavor = flavor
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance.vm_state
@@ -5106,14 +5150,15 @@ class ComputeManager(manager.Manager):
limits = filter_properties.get('limits', {})
allocs = self.reportclient.get_allocations_for_consumer(
context, instance.uuid)
- with self.rt.resize_claim(context, instance, instance_type, node,
- migration, allocs, image_meta=image,
- limits=limits) as claim:
+ with self.rt.resize_claim(
+ context, instance, flavor, node, migration, allocs,
+ image_meta=image, limits=limits,
+ ) as claim:
LOG.info('Migrating', instance=instance)
# RPC cast to the source host to start the actual resize/migration.
self.compute_rpcapi.resize_instance(
- context, instance, claim.migration, image,
- instance_type, request_spec, clean_shutdown)
+ context, instance, claim.migration, image,
+ flavor, request_spec, clean_shutdown)
def _send_prep_resize_notifications(
self, context, instance, phase, flavor):
@@ -5174,10 +5219,24 @@ class ComputeManager(manager.Manager):
with self._error_out_instance_on_exception(
context, instance, instance_state=instance_state),\
errors_out_migration_ctxt(migration):
+
self._send_prep_resize_notifications(
context, instance, fields.NotificationPhase.START,
flavor)
try:
+ scheduler_hints = self._get_scheduler_hints(filter_properties,
+ request_spec)
+ # Error out if this host cannot accept the new instance due
+ # to anti-affinity. At this point the migration is already
+ # in-progress, so this is the definitive moment to abort due to
+ # the policy violation. Also, exploding here is covered by the
+ # cleanup methods in except block.
+ try:
+ self._validate_instance_group_policy(context, instance,
+ scheduler_hints)
+ except exception.RescheduledException as e:
+ raise exception.InstanceFaultRollback(inner_exception=e)
+
self._prep_resize(context, image, instance,
flavor, filter_properties,
node, migration, request_spec,
@@ -5205,7 +5264,7 @@ class ComputeManager(manager.Manager):
flavor)
def _reschedule_resize_or_reraise(self, context, instance, exc_info,
- instance_type, request_spec, filter_properties, host_list):
+ flavor, request_spec, filter_properties, host_list):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
@@ -5234,7 +5293,7 @@ class ComputeManager(manager.Manager):
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(
- context, instance, scheduler_hint, instance_type,
+ context, instance, scheduler_hint, flavor,
request_spec=request_spec, host_list=host_list)
rescheduled = True
@@ -5504,9 +5563,10 @@ class ComputeManager(manager.Manager):
with excutils.save_and_reraise_exception():
self._revert_allocation(context, instance, migration)
- def _resize_instance(self, context, instance, image,
- migration, instance_type, clean_shutdown,
- request_spec):
+ def _resize_instance(
+ self, context, instance, image, migration, flavor,
+ clean_shutdown, request_spec,
+ ):
# Pass instance_state=instance.vm_state because we can resize
# a STOPPED server and we don't want to set it back to ACTIVE
# in case migrate_disk_and_power_off raises InstanceFaultRollback.
@@ -5535,10 +5595,10 @@ class ComputeManager(manager.Manager):
timeout, retry_interval = self._get_power_off_values(
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
- context, instance, migration.dest_host,
- instance_type, network_info,
- block_device_info,
- timeout, retry_interval)
+ context, instance, migration.dest_host,
+ flavor, network_info,
+ block_device_info,
+ timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
@@ -5615,13 +5675,13 @@ class ComputeManager(manager.Manager):
connector)
@staticmethod
- def _set_instance_info(instance, instance_type):
- instance.instance_type_id = instance_type.id
- instance.memory_mb = instance_type.memory_mb
- instance.vcpus = instance_type.vcpus
- instance.root_gb = instance_type.root_gb
- instance.ephemeral_gb = instance_type.ephemeral_gb
- instance.flavor = instance_type
+ def _set_instance_info(instance, flavor):
+ instance.instance_type_id = flavor.id
+ instance.memory_mb = flavor.memory_mb
+ instance.vcpus = flavor.vcpus
+ instance.root_gb = flavor.root_gb
+ instance.ephemeral_gb = flavor.ephemeral_gb
+ instance.flavor = flavor
def _update_volume_attachments(self, context, instance, bdms):
"""Updates volume attachments using the virt driver host connector.
@@ -7909,6 +7969,20 @@ class ComputeManager(manager.Manager):
:param limits: objects.SchedulerLimits object for this live migration.
:returns: a LiveMigrateData object (hypervisor-dependent)
"""
+
+ # Error out if this host cannot accept the new instance due
+ # to anti-affinity. This check at this moment is not very accurate, as
+ # multiple requests may be happening concurrently and miss the lock,
+ # but when it works it provides a better user experience by failing
+ # earlier. Also, it should be safe to explode here, error becomes
+ # NoValidHost and instance status remains ACTIVE.
+ try:
+ self._validate_instance_group_policy(ctxt, instance)
+ except exception.RescheduledException as e:
+ msg = ("Failed to validate instance group policy "
+ "due to: {}".format(e))
+ raise exception.MigrationPreCheckError(reason=msg)
+
src_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, instance.host))
dst_compute_info = obj_base.obj_to_primitive(
@@ -8048,6 +8122,13 @@ class ComputeManager(manager.Manager):
"""
LOG.debug('pre_live_migration data is %s', migrate_data)
+ # Error out if this host cannot accept the new instance due
+ # to anti-affinity. At this point the migration is already in-progress,
+ # so this is the definitive moment to abort due to the policy
+ # violation. Also, it should be safe to explode here. The instance
+ # status remains ACTIVE, migration status failed.
+ self._validate_instance_group_policy(context, instance)
+
migrate_data.old_vol_attachment_ids = {}
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -9082,7 +9163,7 @@ class ComputeManager(manager.Manager):
LOG.debug('Dropping live migration resource claim on destination '
'node %s', nodename, instance=instance)
self.rt.drop_move_claim(
- context, instance, nodename, instance_type=instance.flavor)
+ context, instance, nodename, flavor=instance.flavor)
@wrap_exception()
@wrap_instance_event(prefix='compute')
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 565822d20c..c3b74546c7 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -201,27 +201,29 @@ class ResourceTracker(object):
def rebuild_claim(self, context, instance, nodename, allocations,
limits=None, image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
- instance_type = instance.flavor
return self._move_claim(
- context, instance, instance_type, nodename, migration, allocations,
- move_type=fields.MigrationType.EVACUATION,
+ context, instance, instance.flavor, nodename, migration,
+ allocations, move_type=fields.MigrationType.EVACUATION,
image_meta=image_meta, limits=limits)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
- def resize_claim(self, context, instance, instance_type, nodename,
- migration, allocations, image_meta=None, limits=None):
+ def resize_claim(
+ self, context, instance, flavor, nodename, migration, allocations,
+ image_meta=None, limits=None,
+ ):
"""Create a claim for a resize or cold-migration move.
Note that this code assumes ``instance.new_flavor`` is set when
resizing with a new flavor.
"""
- return self._move_claim(context, instance, instance_type, nodename,
- migration, allocations, image_meta=image_meta,
- limits=limits)
+ return self._move_claim(
+ context, instance, flavor, nodename, migration,
+ allocations, image_meta=image_meta, limits=limits)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
- def live_migration_claim(self, context, instance, nodename, migration,
- limits, allocs):
+ def live_migration_claim(
+ self, context, instance, nodename, migration, limits, allocs,
+ ):
"""Builds a MoveClaim for a live migration.
:param context: The request context.
@@ -235,17 +237,18 @@ class ResourceTracker(object):
:returns: A MoveClaim for this live migration.
"""
# Flavor and image cannot change during a live migration.
- instance_type = instance.flavor
+ flavor = instance.flavor
image_meta = instance.image_meta
return self._move_claim(
- context, instance, instance_type, nodename, migration, allocs,
+ context, instance, flavor, nodename, migration, allocs,
move_type=fields.MigrationType.LIVE_MIGRATION,
image_meta=image_meta, limits=limits,
)
- def _move_claim(self, context, instance, new_instance_type, nodename,
- migration, allocations, move_type=None,
- image_meta=None, limits=None):
+ def _move_claim(
+ self, context, instance, new_flavor, nodename, migration, allocations,
+ move_type=None, image_meta=None, limits=None,
+ ):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
@@ -253,7 +256,7 @@ class ResourceTracker(object):
:param context: security context
:param instance: instance object to reserve resources for
- :param new_instance_type: new instance_type being resized to
+ :param new_flavor: new flavor being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param migration: A migration object if one was already created
elsewhere for this operation (otherwise None)
@@ -271,9 +274,8 @@ class ResourceTracker(object):
if migration:
self._claim_existing_migration(migration, nodename)
else:
- migration = self._create_migration(context, instance,
- new_instance_type,
- nodename, move_type)
+ migration = self._create_migration(
+ context, instance, new_flavor, nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
@@ -287,7 +289,7 @@ class ResourceTracker(object):
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
- new_instance_type)
+ new_flavor)
new_pci_requests.instance_uuid = instance.uuid
# On resize merge the SR-IOV ports pci_requests
# with the new instance flavor pci_requests.
@@ -296,7 +298,7 @@ class ResourceTracker(object):
if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
- new_instance_type, image_meta, self, cn,
+ new_flavor, image_meta, self, cn,
new_pci_requests, migration, limits=limits)
claimed_pci_devices_objs = []
@@ -345,8 +347,9 @@ class ResourceTracker(object):
return claim
- def _create_migration(self, context, instance, new_instance_type,
- nodename, move_type=None):
+ def _create_migration(
+ self, context, instance, new_flavor, nodename, move_type=None,
+ ):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
@@ -356,7 +359,7 @@ class ResourceTracker(object):
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
- migration.new_instance_type_id = new_instance_type.id
+ migration.new_instance_type_id = new_flavor.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
@@ -587,38 +590,35 @@ class ResourceTracker(object):
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def drop_move_claim(self, context, instance, nodename,
- instance_type=None, prefix='new_'):
+ flavor=None, prefix='new_'):
self._drop_move_claim(
- context, instance, nodename, instance_type, prefix='new_')
+ context, instance, nodename, flavor, prefix='new_')
def _drop_move_claim(
- self, context, instance, nodename, instance_type=None, prefix='new_',
+ self, context, instance, nodename, flavor=None, prefix='new_',
):
"""Remove usage for an incoming/outgoing migration.
:param context: Security context.
:param instance: The instance whose usage is to be removed.
:param nodename: Host on which to remove usage. If the migration
- completed successfully, this is normally the source.
- If it did not complete successfully (failed or
- reverted), this is normally the destination.
- :param instance_type: The flavor that determines the usage to remove.
- If the migration completed successfully, this is
- the old flavor to be removed from the source. If
- the migration did not complete successfully, this
- is the new flavor to be removed from the
- destination.
+ completed successfully, this is normally the source. If it did not
+ complete successfully (failed or reverted), this is normally the
+ destination.
+ :param flavor: The flavor that determines the usage to remove. If the
+ migration completed successfully, this is the old flavor to be
+ removed from the source. If the migration did not complete
+ successfully, this is the new flavor to be removed from the
+ destination.
:param prefix: Prefix to use when accessing migration context
- attributes. 'old_' or 'new_', with 'new_' being the
- default.
+ attributes. 'old_' or 'new_', with 'new_' being the default.
"""
# Remove usage for an instance that is tracked in migrations, such as
# on the dest node during revert resize.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
- if not instance_type:
- instance_type = self._get_instance_type(instance, prefix,
- migration)
+ if not flavor:
+ flavor = self._get_flavor(instance, prefix, migration)
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
@@ -626,11 +626,11 @@ class ResourceTracker(object):
elif instance['uuid'] in self.tracked_instances:
self.tracked_instances.remove(instance['uuid'])
- if instance_type is not None:
+ if flavor is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
- instance_type, instance, numa_topology=numa_topology)
+ flavor, instance, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
resources = self._get_migration_context_resource(
'resources', instance, prefix=prefix)
@@ -1298,9 +1298,8 @@ class ResourceTracker(object):
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
- if (instance['instance_type_id'] ==
- migration.old_instance_type_id):
- itype = self._get_instance_type(instance, 'new_', migration)
+ if instance['instance_type_id'] == migration.old_instance_type_id:
+ itype = self._get_flavor(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
@@ -1316,13 +1315,13 @@ class ResourceTracker(object):
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
- itype = self._get_instance_type(instance, 'old_', migration)
+ itype = self._get_flavor(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not tracked:
# instance has not yet migrated here:
- itype = self._get_instance_type(instance, 'new_', migration)
+ itype = self._get_flavor(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
@@ -1332,7 +1331,7 @@ class ResourceTracker(object):
elif outbound and not tracked:
# instance migrated, but record usage for a possible revert:
- itype = self._get_instance_type(instance, 'old_', migration)
+ itype = self._get_flavor(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
# We could be racing with confirm_resize setting the
@@ -1657,15 +1656,15 @@ class ResourceTracker(object):
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
- def _get_instance_type(self, instance, prefix, migration):
- """Get the instance type from instance."""
+ def _get_flavor(self, instance, prefix, migration):
+ """Get the flavor from instance."""
if migration.is_resize:
return getattr(instance, '%sflavor' % prefix)
- else:
- # NOTE(ndipanov): Certain migration types (all but resize)
- # do not change flavors so there is no need to stash
- # them. In that case - just get the instance flavor.
- return instance.flavor
+
+ # NOTE(ndipanov): Certain migration types (all but resize)
+ # do not change flavors so there is no need to stash
+ # them. In that case - just get the instance flavor.
+ return instance.flavor
def _get_usage_dict(self, object_or_dict, instance, **updates):
"""Make a usage dict _update methods expect.
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index e83cb5c812..c5be116584 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -1083,9 +1083,10 @@ def get_headroom(quotas, usages, deltas):
return headroom
-def check_num_instances_quota(context, instance_type, min_count,
- max_count, project_id=None, user_id=None,
- orig_num_req=None):
+def check_num_instances_quota(
+ context, flavor, min_count, max_count, project_id=None, user_id=None,
+ orig_num_req=None,
+):
"""Enforce quota limits on number of instances created."""
# project_id is also used for the TooManyInstances error message
if project_id is None:
@@ -1100,8 +1101,8 @@ def check_num_instances_quota(context, instance_type, min_count,
if not any(r in user_quotas for r in ['instances', 'cores', 'ram']):
user_id = None
# Determine requested cores and ram
- req_cores = max_count * instance_type.vcpus
- req_ram = max_count * instance_type.memory_mb
+ req_cores = max_count * flavor.vcpus
+ req_ram = max_count * flavor.memory_mb
deltas = {'instances': max_count, 'cores': req_cores, 'ram': req_ram}
try:
@@ -1117,8 +1118,8 @@ def check_num_instances_quota(context, instance_type, min_count,
if min_count == max_count == 0:
# orig_num_req is the original number of instances requested in the
# case of a recheck quota, for use in the over quota exception.
- req_cores = orig_num_req * instance_type.vcpus
- req_ram = orig_num_req * instance_type.memory_mb
+ req_cores = orig_num_req * flavor.vcpus
+ req_ram = orig_num_req * flavor.memory_mb
requested = {'instances': orig_num_req, 'cores': req_cores,
'ram': req_ram}
(overs, reqs, total_alloweds, useds) = get_over_quota_detail(
@@ -1136,21 +1137,19 @@ def check_num_instances_quota(context, instance_type, min_count,
allowed = headroom.get('instances', 1)
# Reduce 'allowed' instances in line with the cores & ram headroom
- if instance_type.vcpus:
- allowed = min(allowed,
- headroom['cores'] // instance_type.vcpus)
- if instance_type.memory_mb:
- allowed = min(allowed,
- headroom['ram'] // instance_type.memory_mb)
+ if flavor.vcpus:
+ allowed = min(allowed, headroom['cores'] // flavor.vcpus)
+ if flavor.memory_mb:
+ allowed = min(allowed, headroom['ram'] // flavor.memory_mb)
# Convert to the appropriate exception message
if allowed <= 0:
msg = "Cannot run any more instances of this type."
elif min_count <= allowed <= max_count:
# We're actually OK, but still need to check against allowed
- return check_num_instances_quota(context, instance_type, min_count,
- allowed, project_id=project_id,
- user_id=user_id)
+ return check_num_instances_quota(
+ context, flavor, min_count, allowed, project_id=project_id,
+ user_id=user_id)
else:
msg = "Can only run %s more instances of this type." % allowed
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 63e7624e11..65b67238e3 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -1351,18 +1351,18 @@ class ComputeTaskManager(base.Base):
# TODO(avolkov): move method to bdm
@staticmethod
- def _volume_size(instance_type, bdm):
+ def _volume_size(flavor, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
- size = instance_type.get('swap', 0)
+ size = flavor.get('swap', 0)
else:
- size = instance_type.get('ephemeral_gb', 0)
+ size = flavor.get('ephemeral_gb', 0)
return size
- def _create_block_device_mapping(self, cell, instance_type, instance_uuid,
+ def _create_block_device_mapping(self, cell, flavor, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
@@ -1373,7 +1373,7 @@ class ComputeTaskManager(base.Base):
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
- bdm.volume_size = self._volume_size(instance_type, bdm)
+ bdm.volume_size = self._volume_size(flavor, bdm)
bdm.instance_uuid = instance_uuid
with obj_target_cell(bdm, cell):
bdm.update_or_create()
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 0679dcbdc0..63de7fd975 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -38,7 +38,6 @@ import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import NoSuchTableError
-from sqlalchemy.ext.compiler import compiles
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
@@ -52,7 +51,6 @@ from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.expression import desc
-from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from sqlalchemy.sql import null
@@ -367,23 +365,6 @@ class InequalityCondition(object):
return [field != value for value in self.values]
-class DeleteFromSelect(UpdateBase):
- def __init__(self, table, select, column):
- self.table = table
- self.select = select
- self.column = column
-
-
-# NOTE(guochbo): some versions of MySQL doesn't yet support subquery with
-# 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select .
-@compiles(DeleteFromSelect)
-def visit_delete_from_select(element, compiler, **kw):
- return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
- compiler.process(element.table, asfrom=True),
- compiler.process(element.column),
- element.column.name,
- compiler.process(element.select))
-
###################
@@ -4202,8 +4183,7 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
column = table.c.domain
else:
column = table.c.id
- # NOTE(guochbo): Use DeleteFromSelect to avoid
- # database's limit of maximum parameter in one SQL statement.
+
deleted_column = table.c.deleted
columns = [c.name for c in table.c]
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/402_train.py b/nova/db/sqlalchemy/migrate_repo/versions/402_train.py
index b0fc12af6a..620f5fe393 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/402_train.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/402_train.py
@@ -560,6 +560,7 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
+ # TODO(stephenfin): Remove this table since it has been moved to the API DB
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
@@ -578,6 +579,7 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
+ # TODO(stephenfin): Remove this table since it has been moved to the API DB
instance_type_projects = Table('instance_type_projects', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
@@ -594,6 +596,7 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
+ # TODO(stephenfin): Remove this table since it has been moved to the API DB
instance_types = Table('instance_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index 9c3186b525..ed456f54d9 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -20,6 +20,7 @@ API and utilities for nova-network interactions.
import copy
import functools
+import inspect
import time
import typing as ty
@@ -133,7 +134,7 @@ def refresh_cache(f):
Requires context and instance as function args
"""
- argspec = utils.getargspec(f)
+ argspec = inspect.getfullargspec(f)
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
@@ -271,6 +272,7 @@ def _get_ksa_client(context, admin=False):
client = utils.get_ksa_adapter(
'network', ksa_auth=auth_plugin, ksa_session=session)
client.additional_headers = {'accept': 'application/json'}
+ client.connect_retries = CONF.neutron.http_retries
return client
diff --git a/nova/notifications/base.py b/nova/notifications/base.py
index 603113cb7f..a3f4c7cb45 100644
--- a/nova/notifications/base.py
+++ b/nova/notifications/base.py
@@ -321,9 +321,9 @@ def info_from_instance(context, instance, network_info,
image_ref_url = instance.image_ref
exc_ctx.reraise = False
- instance_type = instance.get_flavor()
- instance_type_name = instance_type.get('name', '')
- instance_flavorid = instance_type.get('flavorid', '')
+ flavor = instance.get_flavor()
+ flavor_name = flavor.get('name', '')
+ instance_flavorid = flavor.get('flavorid', '')
instance_info = dict(
# Owner properties
@@ -337,7 +337,7 @@ def info_from_instance(context, instance, network_info,
hostname=instance.hostname,
# Type properties
- instance_type=instance_type_name,
+ instance_type=flavor_name,
instance_type_id=instance.instance_type_id,
instance_flavor_id=instance_flavorid,
architecture=instance.architecture,
diff --git a/nova/objects/image_meta.py b/nova/objects/image_meta.py
index c07b358647..08ee0151fe 100644
--- a/nova/objects/image_meta.py
+++ b/nova/objects/image_meta.py
@@ -124,6 +124,14 @@ class ImageMeta(base.NovaObject):
"""
sysmeta = utils.instance_sys_meta(instance)
image_meta = utils.get_image_from_system_metadata(sysmeta)
+
+ # NOTE(lyarwood): Provide the id of the image in image_meta if it
+ # wasn't persisted in the system_metadata of the instance previously.
+ # This is only provided to allow users of image_meta to avoid the need
+ # to pass around references to instance.image_ref alongside image_meta.
+ if image_meta.get('id') is None and instance.image_ref:
+ image_meta['id'] = instance.image_ref
+
return cls.from_dict(image_meta)
@classmethod
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index 2cd84598d0..d50efc5159 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -148,6 +148,8 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
+ # TODO(stephenfin): Remove this in version 3.0 of the object as it has
+ # been replaced by 'flavor'
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),
diff --git a/nova/objects/migration.py b/nova/objects/migration.py
index 9a195086cc..34e30922a4 100644
--- a/nova/objects/migration.py
+++ b/nova/objects/migration.py
@@ -53,6 +53,8 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
'source_node': fields.StringField(nullable=True), # source nodename
'dest_node': fields.StringField(nullable=True), # dest nodename
'dest_host': fields.StringField(nullable=True), # dest host IP
+ # TODO(stephenfin): Rename these to old_flavor_id, new_flavor_id in
+ # v2.0
'old_instance_type_id': fields.IntegerField(nullable=True),
'new_instance_type_id': fields.IntegerField(nullable=True),
'instance_uuid': fields.StringField(nullable=True),
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
index 11334335e0..5478a1e9c9 100644
--- a/nova/objects/request_spec.py
+++ b/nova/objects/request_spec.py
@@ -674,8 +674,7 @@ class RequestSpec(base.NovaObject):
spec.instance_group.hosts = None
# NOTE(mriedem): Don't persist these since they are per-request
for excluded in ('retry', 'requested_destination',
- 'requested_resources', 'ignore_hosts',
- 'request_level_params'):
+ 'requested_resources', 'ignore_hosts'):
if excluded in spec and getattr(spec, excluded):
setattr(spec, excluded, None)
# NOTE(stephenfin): Don't persist network metadata since we have
@@ -686,6 +685,10 @@ class RequestSpec(base.NovaObject):
# no need for it after scheduling
if 'requested_networks' in spec and spec.requested_networks:
del spec.requested_networks
+ # NOTE(gibi): Don't persist requested_networks since we have
+ # no need for it after scheduling
+ if 'request_level_params' in spec and spec.request_level_params:
+ del spec.request_level_params
db_updates = {'spec': jsonutils.dumps(spec.obj_to_primitive())}
if 'instance_uuid' in updates:
diff --git a/nova/scheduler/filters/aggregate_instance_extra_specs.py b/nova/scheduler/filters/aggregate_instance_extra_specs.py
index 58471ba375..68017b1d38 100644
--- a/nova/scheduler/filters/aggregate_instance_extra_specs.py
+++ b/nova/scheduler/filters/aggregate_instance_extra_specs.py
@@ -28,7 +28,7 @@ _SCOPE = 'aggregate_instance_extra_specs'
class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
- """AggregateInstanceExtraSpecsFilter works with InstanceType records."""
+ """AggregateInstanceExtraSpecsFilter works with flavor records."""
# Aggregate data and instance type does not change within a request
run_filter_once_per_request = True
@@ -36,21 +36,20 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
- """Return a list of hosts that can create instance_type
+ """Return a list of hosts that can create flavor.
Check that the extra specs associated with the instance type match
the metadata provided by aggregates. If not present return False.
"""
- instance_type = spec_obj.flavor
+ flavor = spec_obj.flavor
# If 'extra_specs' is not present or extra_specs are empty then we
# need not proceed further
- if (not instance_type.obj_attr_is_set('extra_specs') or
- not instance_type.extra_specs):
+ if 'extra_specs' not in flavor or not flavor.extra_specs:
return True
metadata = utils.aggregate_metadata_get_by_host(host_state)
- for key, req in instance_type.extra_specs.items():
+ for key, req in flavor.extra_specs.items():
# Either not scope format, or aggregate_instance_extra_specs scope
scope = key.split(':', 1)
if len(scope) > 1:
@@ -62,18 +61,20 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
aggregate_vals = metadata.get(key, None)
if not aggregate_vals:
LOG.debug(
- "%(host_state)s fails instance_type extra_specs "
- "requirements. Extra_spec %(key)s is not in aggregate.",
+ "%(host_state)s fails flavor extra_specs requirements. "
+ "Extra_spec %(key)s is not in aggregate.",
{'host_state': host_state, 'key': key})
return False
for aggregate_val in aggregate_vals:
if extra_specs_ops.match(aggregate_val, req):
break
else:
- LOG.debug("%(host_state)s fails instance_type extra_specs "
- "requirements. '%(aggregate_vals)s' do not "
- "match '%(req)s'",
- {'host_state': host_state, 'req': req,
- 'aggregate_vals': aggregate_vals})
+ LOG.debug(
+ "%(host_state)s fails flavor extra_specs requirements. "
+ "'%(aggregate_vals)s' do not match '%(req)s'",
+ {
+ 'host_state': host_state, 'req': req,
+ 'aggregate_vals': aggregate_vals,
+ })
return False
return True
diff --git a/nova/scheduler/filters/compute_capabilities_filter.py b/nova/scheduler/filters/compute_capabilities_filter.py
index 8d2e06cbd9..9818867618 100644
--- a/nova/scheduler/filters/compute_capabilities_filter.py
+++ b/nova/scheduler/filters/compute_capabilities_filter.py
@@ -65,14 +65,14 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
return None
return cap
- def _satisfies_extra_specs(self, host_state, instance_type):
+ def _satisfies_extra_specs(self, host_state, flavor):
"""Check that the host_state provided by the compute service
satisfies the extra specs associated with the instance type.
"""
- if 'extra_specs' not in instance_type:
+ if 'extra_specs' not in flavor:
return True
- for key, req in instance_type.extra_specs.items():
+ for key, req in flavor.extra_specs.items():
# Either not scope format, or in capabilities scope
scope = key.split(':')
# If key does not have a namespace, the scope's size is 1, check
@@ -106,10 +106,10 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
return True
def host_passes(self, host_state, spec_obj):
- """Return a list of hosts that can create instance_type."""
- instance_type = spec_obj.flavor
- if not self._satisfies_extra_specs(host_state, instance_type):
- LOG.debug("%(host_state)s fails instance_type extra_specs "
- "requirements", {'host_state': host_state})
+ """Return a list of hosts that can create flavor."""
+ if not self._satisfies_extra_specs(host_state, spec_obj.flavor):
+ LOG.debug(
+ "%(host_state)s fails flavor extra_specs requirements",
+ {'host_state': host_state})
return False
return True
diff --git a/nova/scheduler/filters/type_filter.py b/nova/scheduler/filters/type_filter.py
index 5b386cf83f..f60392a138 100644
--- a/nova/scheduler/filters/type_filter.py
+++ b/nova/scheduler/filters/type_filter.py
@@ -19,9 +19,9 @@ from nova.scheduler.filters import utils
class AggregateTypeAffinityFilter(filters.BaseHostFilter):
- """AggregateTypeAffinityFilter limits instance_type by aggregate
+ """AggregateTypeAffinityFilter limits flavors by aggregate
- return True if no instance_type key is set or if the aggregate metadata
+ return True if no flavor key is set or if the aggregate metadata
key 'instance_type' has the instance_type name as a value
"""
@@ -31,13 +31,11 @@ class AggregateTypeAffinityFilter(filters.BaseHostFilter):
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
- instance_type = spec_obj.flavor
-
+ # TODO(stephenfin): Add support for 'flavor' key
aggregate_vals = utils.aggregate_values_from_key(
host_state, 'instance_type')
for val in aggregate_vals:
- if (instance_type.name in
- [x.strip() for x in val.split(',')]):
+ if spec_obj.flavor.name in [x.strip() for x in val.split(',')]:
return True
return not aggregate_vals
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index b71c209d13..d3d13ce0e4 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -534,7 +534,7 @@ class ResourceRequest(object):
list(str(rg) for rg in list(self._rg_by_id.values()))))
-def build_request_spec(image, instances, instance_type=None):
+def build_request_spec(image, instances, flavor=None):
"""Build a request_spec (ahem, not a RequestSpec) for the scheduler.
The request_spec assumes that all instances to be scheduled are the same
@@ -543,21 +543,21 @@ def build_request_spec(image, instances, instance_type=None):
:param image: optional primitive image meta dict
:param instances: list of instances; objects will be converted to
primitives
- :param instance_type: optional flavor; objects will be converted to
+ :param flavor: optional flavor; objects will be converted to
primitives
:return: dict with the following keys::
'image': the image dict passed in or {}
'instance_properties': primitive version of the first instance passed
- 'instance_type': primitive version of the instance_type or None
+ 'instance_type': primitive version of the flavor or None
'num_instances': the number of instances passed in
"""
instance = instances[0]
- if instance_type is None:
+ if flavor is None:
if isinstance(instance, obj_instance.Instance):
- instance_type = instance.get_flavor()
+ flavor = instance.get_flavor()
else:
- instance_type = flavors.extract_flavor(instance)
+ flavor = flavors.extract_flavor(instance)
if isinstance(instance, obj_instance.Instance):
instance = obj_base.obj_to_primitive(instance)
@@ -565,25 +565,26 @@ def build_request_spec(image, instances, instance_type=None):
# to detach our metadata blob because we modify it below.
instance['system_metadata'] = dict(instance.get('system_metadata', {}))
- if isinstance(instance_type, objects.Flavor):
- instance_type = obj_base.obj_to_primitive(instance_type)
+ if isinstance(flavor, objects.Flavor):
+ flavor = obj_base.obj_to_primitive(flavor)
# NOTE(danms): Replicate this old behavior because the
# scheduler RPC interface technically expects it to be
# there. Remove this when we bump the scheduler RPC API to
# v5.0
try:
- flavors.save_flavor_info(instance.get('system_metadata', {}),
- instance_type)
+ flavors.save_flavor_info(
+ instance.get('system_metadata', {}), flavor)
except KeyError:
# If the flavor isn't complete (which is legit with a
# flavor object, just don't put it in the request spec
pass
request_spec = {
- 'image': image or {},
- 'instance_properties': instance,
- 'instance_type': instance_type,
- 'num_instances': len(instances)}
+ 'image': image or {},
+ 'instance_properties': instance,
+ 'instance_type': flavor,
+ 'num_instances': len(instances),
+ }
# NOTE(mriedem): obj_to_primitive above does not serialize everything
# in an object, like datetime fields, so we need to still call to_primitive
# to recursively serialize the items in the request_spec dict.
@@ -898,11 +899,12 @@ def set_vm_state_and_notify(context, instance_uuid, service, method, updates,
context, method, instance_uuid, request_spec, vm_state, ex)
-def build_filter_properties(scheduler_hints, forced_host,
- forced_node, instance_type):
+def build_filter_properties(
+ scheduler_hints, forced_host, forced_node, flavor,
+):
"""Build the filter_properties dict from data in the boot request."""
filter_properties = dict(scheduler_hints=scheduler_hints)
- filter_properties['instance_type'] = instance_type
+ filter_properties['instance_type'] = flavor
# TODO(alaski): It doesn't seem necessary that these are conditionally
# added. Let's just add empty lists if not forced_host/node.
if forced_host:
diff --git a/nova/test.py b/nova/test.py
index 8b3facbab7..4fd6c479de 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -633,8 +633,8 @@ class TestCase(base.BaseTestCase):
for name in sorted(implmethods.keys()):
# NOTE(stephenfin): We ignore type annotations
- baseargs = utils.getargspec(basemethods[name])[:-1]
- implargs = utils.getargspec(implmethods[name])[:-1]
+ baseargs = inspect.getfullargspec(basemethods[name])[:-1]
+ implargs = inspect.getfullargspec(implmethods[name])[:-1]
self.assertEqual(baseargs, implargs,
"%s args don't match base class %s" %
@@ -707,7 +707,7 @@ class SubclassSignatureTestCase(testtools.TestCase, metaclass=abc.ABCMeta):
# instead.
method = getattr(method, '__wrapped__')
- argspecs[name] = utils.getargspec(method)
+ argspecs[name] = inspect.getfullargspec(method)
return argspecs
diff --git a/nova/tests/functional/regressions/test_bug_1806064.py b/nova/tests/functional/regressions/test_bug_1806064.py
index 93696f3c4f..9583711526 100644
--- a/nova/tests/functional/regressions/test_bug_1806064.py
+++ b/nova/tests/functional/regressions/test_bug_1806064.py
@@ -87,7 +87,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest(
# but fails in conductor once the instance has been created in cell1.
original_quota_check = compute_utils.check_num_instances_quota
- def stub_check_num_instances_quota(_self, context, instance_type,
+ def stub_check_num_instances_quota(_self, context, flavor,
min_count, *args, **kwargs):
# Determine where we are in the flow based on whether or not the
# min_count is 0 (API will pass 1, conductor will pass 0).
@@ -96,7 +96,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest(
'test_bfv_quota_race_local_delete')
# We're checking from the API so perform the original quota check.
return original_quota_check(
- _self, context, instance_type, min_count, *args, **kwargs)
+ _self, context, flavor, min_count, *args, **kwargs)
self.stub_out('nova.compute.utils.check_num_instances_quota',
stub_check_num_instances_quota)
diff --git a/nova/tests/functional/regressions/test_bug_1843090.py b/nova/tests/functional/regressions/test_bug_1843090.py
index 8ccb83192c..ed02d59cb4 100644
--- a/nova/tests/functional/regressions/test_bug_1843090.py
+++ b/nova/tests/functional/regressions/test_bug_1843090.py
@@ -43,8 +43,9 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
claim_calls = []
def fake_orig_claim(
- _self, context, instance, instance_type, nodename,
- *args, **kwargs):
+ _self, context, instance, flavor, nodename,
+ *args, **kwargs,
+ ):
if not claim_calls:
claim_calls.append(nodename)
raise exception.ComputeResourcesUnavailable(
@@ -52,7 +53,7 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
else:
claim_calls.append(nodename)
return orig_claim(
- _self, context, instance, instance_type, nodename, *args,
+ _self, context, instance, flavor, nodename, *args,
**kwargs)
with mock.patch(
diff --git a/nova/tests/functional/regressions/test_bug_1845291.py b/nova/tests/functional/regressions/test_bug_1845291.py
index bdfa460a0c..101774416a 100644
--- a/nova/tests/functional/regressions/test_bug_1845291.py
+++ b/nova/tests/functional/regressions/test_bug_1845291.py
@@ -47,8 +47,9 @@ class ForcedHostMissingReScheduleTestCase(
claim_calls = []
def fake_orig_claim(
- _self, context, instance, instance_type, nodename,
- *args, **kwargs):
+ _self, context, instance, flavor, nodename,
+ *args, **kwargs,
+ ):
if not claim_calls:
claim_calls.append(nodename)
raise exception.ComputeResourcesUnavailable(
@@ -56,7 +57,7 @@ class ForcedHostMissingReScheduleTestCase(
else:
claim_calls.append(nodename)
return orig_claim(
- _self, context, instance, instance_type, nodename, *args,
+ _self, context, instance, flavor, nodename, *args,
**kwargs)
with mock.patch(
diff --git a/nova/tests/functional/regressions/test_bug_1928063.py b/nova/tests/functional/regressions/test_bug_1928063.py
index b3a5f969cf..b1b1d36e16 100644
--- a/nova/tests/functional/regressions/test_bug_1928063.py
+++ b/nova/tests/functional/regressions/test_bug_1928063.py
@@ -17,7 +17,6 @@ from oslo_utils.fixture import uuidsentinel as uuids
from nova import test
from nova.tests.fixtures import libvirt as fakelibvirt
-from nova.tests.functional.api import client
from nova.tests.functional.libvirt import base
from nova.virt.libvirt.host import SEV_KERNEL_PARAM_FILE
@@ -58,12 +57,5 @@ class TestSEVInstanceReboot(base.ServersTestBase):
networks='none'
)
- # FIXME(lyarwood): This is bug #1928063, the instance fails to reboot
- # due to a NotImplementedError exception being raised when we try to
- # access image_meta.name as this isn't stashed in the system_metadata
- # of the instance and as a result is not provided in the image_meta
- # associated with the instance during this flow.
- ex = self.assertRaises(
- client.OpenStackApiException,
- self._reboot_server, server, hard=True)
- self.assertEqual(500, ex.response.status_code)
+ # Hard reboot the server
+ self._reboot_server(server, hard=True)
diff --git a/nova/tests/unit/api/openstack/compute/test_disk_config.py b/nova/tests/unit/api/openstack/compute/test_disk_config.py
index ccbc29187e..8f78f3f012 100644
--- a/nova/tests/unit/api/openstack/compute/test_disk_config.py
+++ b/nova/tests/unit/api/openstack/compute/test_disk_config.py
@@ -73,19 +73,18 @@ class DiskConfigTestCaseV21(test.TestCase):
def fake_instance_create(context, inst_, session=None):
inst = fake_instance.fake_db_instance(**{
- 'id': 1,
- 'uuid': AUTO_INSTANCE_UUID,
- 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
- 'updated_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
- 'progress': 0,
- 'name': 'instance-1', # this is a property
- 'task_state': '',
- 'vm_state': '',
- 'auto_disk_config': inst_['auto_disk_config'],
- 'security_groups': inst_['security_groups'],
- 'instance_type': objects.Flavor.get_by_name(context,
- 'm1.small'),
- })
+ 'id': 1,
+ 'uuid': AUTO_INSTANCE_UUID,
+ 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ 'updated_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ 'progress': 0,
+ 'name': 'instance-1', # this is a property
+ 'task_state': '',
+ 'vm_state': '',
+ 'auto_disk_config': inst_['auto_disk_config'],
+ 'security_groups': inst_['security_groups'],
+ 'flavor': objects.Flavor.get_by_name(context, 'm1.small'),
+ })
return inst
diff --git a/nova/tests/unit/api/openstack/compute/test_evacuate.py b/nova/tests/unit/api/openstack/compute/test_evacuate.py
index 519bb33161..361a05e12b 100644
--- a/nova/tests/unit/api/openstack/compute/test_evacuate.py
+++ b/nova/tests/unit/api/openstack/compute/test_evacuate.py
@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
import testtools
@@ -67,9 +66,6 @@ class EvacuateTestV21(test.NoDBTestCase):
self.stub_out('nova.compute.api.API.get', fake_compute_api_get)
self.stub_out('nova.compute.api.HostAPI.service_get_by_compute_host',
fake_service_get_by_compute_host)
- self.mock_list_port = self.useFixture(
- fixtures.MockPatch('nova.network.neutron.API.list_ports')).mock
- self.mock_list_port.return_value = {'ports': []}
self.UUID = uuids.fake
for _method in self._methods:
self.stub_out('nova.compute.api.API.%s' % _method,
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index 7d24f6cbcb..8c25a2efc2 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -49,16 +49,18 @@ def generate_flavor(flavorid, ispublic):
}
-INSTANCE_TYPES = {
- '0': generate_flavor(0, True),
- '1': generate_flavor(1, True),
- '2': generate_flavor(2, False),
- '3': generate_flavor(3, False)}
+FLAVORS = {
+ '0': generate_flavor(0, True),
+ '1': generate_flavor(1, True),
+ '2': generate_flavor(2, False),
+ '3': generate_flavor(3, False)}
-ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
- {'flavor_id': '2', 'project_id': 'proj3'},
- {'flavor_id': '3', 'project_id': 'proj3'}]
+ACCESS_LIST = [
+ {'flavor_id': '2', 'project_id': 'proj2'},
+ {'flavor_id': '2', 'project_id': 'proj3'},
+ {'flavor_id': '3', 'project_id': 'proj3'},
+]
def fake_get_flavor_access_by_flavor_id(context, flavorid):
@@ -70,7 +72,7 @@ def fake_get_flavor_access_by_flavor_id(context, flavorid):
def fake_get_flavor_by_flavor_id(context, flavorid):
- return INSTANCE_TYPES[flavorid]
+ return FLAVORS[flavorid]
def _has_flavor_access(flavorid, projectid):
@@ -85,10 +87,10 @@ def fake_get_all_flavors_sorted_list(context, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
if filters is None or filters['is_public'] is None:
- return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
+ return sorted(FLAVORS.values(), key=lambda item: item[sort_key])
res = {}
- for k, v in INSTANCE_TYPES.items():
+ for k, v in FLAVORS.items():
if filters['is_public'] and _has_flavor_access(k, context.project_id):
res.update({k: v})
continue
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors.py b/nova/tests/unit/api/openstack/compute/test_flavors.py
index d43387dd37..40c8f9661c 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavors.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavors.py
@@ -914,12 +914,12 @@ class DisabledFlavorsWithRealDBTestV21(test.TestCase):
self.context = self.req.environ['nova.context']
self.admin_context = context.get_admin_context()
- self.disabled_type = self._create_disabled_instance_type()
+ self.disabled_type = self._create_disabled_flavor()
self.addCleanup(self.disabled_type.destroy)
self.inst_types = objects.FlavorList.get_all(self.admin_context)
self.controller = self.Controller()
- def _create_disabled_instance_type(self):
+ def _create_disabled_flavor(self):
flavor = objects.Flavor(context=self.admin_context,
name='foo.disabled', flavorid='10.disabled',
memory_mb=512, vcpus=2, root_gb=1,
diff --git a/nova/tests/unit/api/openstack/compute/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
index 183a823fbb..3872a5196b 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrate_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -45,9 +44,6 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
self.stub_out('nova.api.openstack.compute.migrate_server.'
'MigrateServerController',
lambda *a, **kw: self.controller)
- self.mock_list_port = self.useFixture(
- fixtures.MockPatch('nova.network.neutron.API.list_ports')).mock
- self.mock_list_port.return_value = {'ports': []}
def _get_migration_body(self, **kwargs):
return {'os-migrateLive': self._get_params(**kwargs)}
diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py
index b32d2a9003..3e581aa51b 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py
@@ -14,7 +14,6 @@
# under the License.
import ddt
-import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -100,10 +99,6 @@ class ServerActionsControllerTestV21(test.TestCase):
self.controller.compute_api, 'compute_task_api')
mock_conductor.start()
self.addCleanup(mock_conductor.stop)
- # Assume that none of the tests are using ports with resource requests.
- self.mock_list_port = self.useFixture(
- fixtures.MockPatch('nova.network.neutron.API.list_ports')).mock
- self.mock_list_port.return_value = {'ports': []}
def _get_controller(self):
return self.servers.ServersController()
@@ -267,7 +262,7 @@ class ServerActionsControllerTestV21(test.TestCase):
def _test_rebuild_preserve_ephemeral(self, value=None):
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
- image_ref='2',
+ image_ref=uuids.image_ref,
vm_state=vm_states.ACTIVE,
host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
@@ -302,7 +297,7 @@ class ServerActionsControllerTestV21(test.TestCase):
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
- image_ref='2',
+ image_ref=uuids.image_ref,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
@@ -316,7 +311,7 @@ class ServerActionsControllerTestV21(test.TestCase):
robj = self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
body = robj.obj
- self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(body['server']['image']['id'], uuids.image_ref)
self.assertEqual(len(body['server']['adminPass']),
CONF.password_length)
@@ -361,7 +356,7 @@ class ServerActionsControllerTestV21(test.TestCase):
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
- image_ref='2',
+ image_ref=uuids.image_ref,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
@@ -375,7 +370,7 @@ class ServerActionsControllerTestV21(test.TestCase):
robj = self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
body = robj.obj
- self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(body['server']['image']['id'], uuids.image_ref)
self.assertNotIn("adminPass", body['server'])
self.assertEqual(robj['location'], self_href)
@@ -473,7 +468,7 @@ class ServerActionsControllerTestV21(test.TestCase):
def test_rebuild_admin_pass(self):
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
- image_ref='2',
+ image_ref=uuids.image_ref,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
@@ -487,7 +482,7 @@ class ServerActionsControllerTestV21(test.TestCase):
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
- self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(body['server']['image']['id'], uuids.image_ref)
self.assertEqual(body['server']['adminPass'], 'asdf')
def test_rebuild_admin_pass_pass_disabled(self):
@@ -497,7 +492,7 @@ class ServerActionsControllerTestV21(test.TestCase):
return_server = fakes.fake_compute_get(
project_id=fakes.FAKE_PROJECT_ID,
- image_ref='2',
+ image_ref=FAKE_UUID,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.compute.api.API.get', return_server)
@@ -511,7 +506,7 @@ class ServerActionsControllerTestV21(test.TestCase):
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
- self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(body['server']['image']['id'], FAKE_UUID)
self.assertNotIn('adminPass', body['server'])
def test_rebuild_server_not_found(self):
@@ -578,12 +573,20 @@ class ServerActionsControllerTestV21(test.TestCase):
def return_image_meta(*args, **kwargs):
image_meta_table = {
- '2': {'id': uuids.image_id, 'status': 'active',
- 'container_format': 'ari'},
- '155d900f-4e14-4e4c-a73d-069cbf4541e6':
- {'id': uuids.image_id, 'status': 'active',
- 'container_format': 'raw',
- 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
+ uuids.image_1_id: {
+ 'id': uuids.image_1_id,
+ 'status': 'active',
+ 'container_format': 'ari'
+ },
+ uuids.image_2_id: {
+ 'id': uuids.image_2_id,
+ 'status': 'active',
+ 'container_format': 'raw',
+ 'properties': {
+ 'kernel_id': uuids.kernel_id,
+ 'ramdisk_id': uuids.ramdisk_id
+ }
+ },
}
image_id = args[2]
try:
@@ -597,7 +600,7 @@ class ServerActionsControllerTestV21(test.TestCase):
return_image_meta)
body = {
"rebuild": {
- "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "imageRef": uuids.image_2_id,
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
diff --git a/nova/tests/unit/api/openstack/compute/test_servers.py b/nova/tests/unit/api/openstack/compute/test_servers.py
index 7e90e2ff0e..397adec18a 100644
--- a/nova/tests/unit/api/openstack/compute/test_servers.py
+++ b/nova/tests/unit/api/openstack/compute/test_servers.py
@@ -410,7 +410,7 @@ class ServersControllerTest(ControllerTest):
"status": status,
"hostId": '',
"image": {
- "id": "10",
+ "id": FAKE_UUID,
"links": [
{
"rel": "bookmark",
@@ -474,7 +474,8 @@ class ServersControllerTest(ControllerTest):
}
def test_get_server_by_id(self):
- image_bookmark = "http://localhost/%s/images/10" % self.project_id
+ image_bookmark = "http://localhost/%s/images/%s" % (
+ self.project_id, FAKE_UUID)
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
uuid = FAKE_UUID
@@ -499,7 +500,8 @@ class ServersControllerTest(ControllerTest):
self.assertEqual(res_dict['server']['OS-EXT-AZ:availability_zone'], '')
def test_get_server_with_active_status_by_id(self):
- image_bookmark = "http://localhost/%s/images/10" % self.project_id
+ image_bookmark = "http://localhost/%s/images/%s" % (
+ self.project_id, FAKE_UUID)
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
res_dict = self.controller.show(self.request, FAKE_UUID)
@@ -517,7 +519,8 @@ class ServersControllerTest(ControllerTest):
'numa_topology'], cell_down_support=False)
def test_get_server_with_id_image_ref_by_id(self):
- image_bookmark = "http://localhost/%s/images/10" % self.project_id
+ image_bookmark = "http://localhost/%s/images/%s" % (
+ self.project_id, FAKE_UUID)
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
res_dict = self.controller.show(self.request, FAKE_UUID)
@@ -1710,12 +1713,12 @@ class ServersControllerTest(ControllerTest):
],
}
expected_image = {
- "id": "10",
+ "id": FAKE_UUID,
"links": [
{
"rel": "bookmark",
- "href": ('http://localhost/%s/images/10' %
- self.project_id),
+ "href": ('http://localhost/%s/images/%s' % (
+ self.project_id, FAKE_UUID)),
},
],
}
@@ -1838,7 +1841,8 @@ class ServersControllerTestV23(ServersControllerTest):
return server_dict
def test_show(self):
- image_bookmark = "http://localhost/%s/images/10" % self.project_id
+ image_bookmark = "http://localhost/%s/images/%s" % (
+ self.project_id, FAKE_UUID)
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
res_dict = self.controller.show(self.request, FAKE_UUID)
@@ -1880,7 +1884,8 @@ class ServersControllerTestV23(ServersControllerTest):
req.environ['nova.context'])
servers_list = self.controller.detail(req)
- image_bookmark = "http://localhost/%s/images/10" % self.project_id
+ image_bookmark = "http://localhost/%s/images/%s" % (
+ self.project_id, FAKE_UUID)
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
@@ -1941,7 +1946,8 @@ class ServersControllerTestV29(ServersControllerTest):
return server_dict
def _test_get_server_with_lock(self, locked_by):
- image_bookmark = "http://localhost/%s/images/10" % self.project_id
+ image_bookmark = "http://localhost/%s/images/%s" % (
+ self.project_id, FAKE_UUID)
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
req = self.req(self.path_with_id % FAKE_UUID)
project_id = req.environ['nova.context'].project_id
@@ -2121,7 +2127,8 @@ class ServersControllerTestV216(ServersControllerTest):
policy.set_rules(orig_rules)
def test_show(self):
- image_bookmark = "http://localhost/%s/images/10" % self.project_id
+ image_bookmark = "http://localhost/%s/images/%s" % (
+ self.project_id, FAKE_UUID)
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
res_dict = self.controller.show(self.request, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
@@ -2166,7 +2173,8 @@ class ServersControllerTestV216(ServersControllerTest):
servers_list = self.controller.detail(req)
self.assertEqual(2, len(servers_list['servers']))
- image_bookmark = "http://localhost/%s/images/10" % self.project_id
+ image_bookmark = "http://localhost/%s/images/%s" % (
+ self.project_id, FAKE_UUID)
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
@@ -2237,7 +2245,8 @@ class ServersControllerTestV219(ServersControllerTest):
return server_dict
def _test_get_server_with_description(self, description):
- image_bookmark = "http://localhost/%s/images/10" % self.project_id
+ image_bookmark = "http://localhost/%s/images/%s" % (
+ self.project_id, FAKE_UUID)
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
req = self.req(self.path_with_id % FAKE_UUID)
project_id = req.environ['nova.context'].project_id
@@ -4199,7 +4208,7 @@ class ServersControllerCreateTest(test.TestCase):
self.controller = servers.ServersController()
def instance_create(context, inst):
- inst_type = flavors.get_flavor_by_flavor_id(3)
+ flavor = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/%s/images/%s' % (self.project_id,
image_uuid)
@@ -4209,7 +4218,7 @@ class ServersControllerCreateTest(test.TestCase):
'display_name': inst['display_name'] or 'test',
'display_description': inst['display_description'] or '',
'uuid': FAKE_UUID,
- 'instance_type': inst_type,
+ 'flavor': flavor,
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': fakes.FAKE_PROJECT_ID,
@@ -7185,7 +7194,7 @@ class ServersViewBuilderTest(test.TestCase):
'ips': [_ip(fixed_ipv4[2])]}]}}]
return nw_cache
- def test_get_flavor_valid_instance_type(self):
+ def test_get_flavor_valid_flavor(self):
flavor_bookmark = "http://localhost/%s/flavors/1" % self.project_id
expected = {"id": "1",
"links": [{"rel": "bookmark",
diff --git a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
index 111bd68077..fa451f5a18 100644
--- a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
+++ b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
@@ -78,7 +78,7 @@ def _fake_instance(start, end, instance_id, tenant_id,
project_id=tenant_id,
user_id='fakeuser',
display_name='name',
- instance_type_id=FAKE_INST_TYPE['id'],
+ instance_type_id=flavor.id,
launched_at=start,
terminated_at=end,
vm_state=vm_state,
diff --git a/nova/tests/unit/api/openstack/compute/test_volumes.py b/nova/tests/unit/api/openstack/compute/test_volumes.py
index c24de043e5..8a2ef8010d 100644
--- a/nova/tests/unit/api/openstack/compute/test_volumes.py
+++ b/nova/tests/unit/api/openstack/compute/test_volumes.py
@@ -126,18 +126,18 @@ class BootFromVolumeTest(test.TestCase):
self._legacy_bdm_seen = True
def _get_fake_compute_api_create(self):
- def _fake_compute_api_create(cls, context, instance_type,
+ def _fake_compute_api_create(cls, context, flavor,
image_href, **kwargs):
self._block_device_mapping_seen = kwargs.get(
'block_device_mapping')
self._legacy_bdm_seen = kwargs.get('legacy_bdm')
- inst_type = flavors.get_flavor_by_flavor_id(2)
+ flavor = flavors.get_flavor_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
- 'instance_type': inst_type,
+ 'flavor': flavor,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
index 6c50bbff72..71a4abf1a9 100644
--- a/nova/tests/unit/api/openstack/fakes.py
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -428,7 +428,7 @@ def fake_compute_get_all(num_servers=5, **kwargs):
def stub_instance(id=1, user_id=None, project_id=None, host=None,
node=None, vm_state=None, task_state=None,
- reservation_id="", uuid=FAKE_UUID, image_ref="10",
+ reservation_id="", uuid=FAKE_UUID, image_ref=FAKE_UUID,
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
@@ -441,7 +441,7 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
terminated_at=timeutils.utcnow(),
availability_zone='', locked_by=None, cleaned=False,
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
- instance_type=None, launch_index=0, kernel_id="",
+ flavor=None, launch_index=0, kernel_id="",
ramdisk_id="", user_data=None, system_metadata=None,
services=None, trusted_certs=None, hidden=False):
if user_id is None:
@@ -481,11 +481,11 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
info_cache = create_info_cache(nw_cache)
- if instance_type is None:
- instance_type = objects.Flavor.get_by_name(
+ if flavor is None:
+ flavor = objects.Flavor.get_by_name(
context.get_admin_context(), 'm1.small')
flavorinfo = jsonutils.dumps({
- 'cur': instance_type.obj_to_primitive(),
+ 'cur': flavor.obj_to_primitive(),
'old': None,
'new': None,
})
@@ -501,62 +501,60 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
"image_ref": image_ref,
"kernel_id": kernel_id,
"ramdisk_id": ramdisk_id,
+ "hostname": display_name or server_name,
"launch_index": launch_index,
"key_name": key_name,
"key_data": key_data,
- "config_drive": config_drive,
+ "power_state": power_state,
"vm_state": vm_state or vm_states.ACTIVE,
"task_state": task_state,
- "power_state": power_state,
+ "services": services,
"memory_mb": memory_mb,
"vcpus": vcpus,
"root_gb": root_gb,
"ephemeral_gb": ephemeral_gb,
"ephemeral_key_uuid": None,
- "hostname": display_name or server_name,
"host": host,
"node": node,
- "instance_type_id": 1,
- "instance_type": inst_type,
+ "instance_type_id": flavor.id,
"user_data": user_data,
"reservation_id": reservation_id,
- "mac_address": "",
"launched_at": launched_at,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": display_description,
+ "launched_on": "",
"locked": locked_by is not None,
"locked_by": locked_by,
- "metadata": metadata,
+ "os_type": "",
+ "architecture": "",
+ "vm_mode": "",
+ "uuid": uuid,
+ "root_device_name": root_device_name,
+ "default_ephemeral_device": "",
+ "default_swap_device": "",
+ "config_drive": config_drive,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
- "uuid": uuid,
- "progress": progress,
"auto_disk_config": auto_disk_config,
- "name": "instance-%s" % id,
+ "progress": progress,
"shutdown_terminate": True,
"disable_terminate": False,
- "security_groups": security_groups,
- "root_device_name": root_device_name,
+ "cell_name": "",
+ "metadata": metadata,
"system_metadata": utils.dict_to_metadata(sys_meta),
+ "security_groups": security_groups,
+ "cleaned": cleaned,
"pci_devices": [],
- "vm_mode": "",
- "default_swap_device": "",
- "default_ephemeral_device": "",
- "launched_on": "",
- "cell_name": "",
- "architecture": "",
- "os_type": "",
"extra": {"numa_topology": None,
"pci_requests": None,
"flavor": flavorinfo,
"trusted_certs": trusted_certs,
},
- "cleaned": cleaned,
- "services": services,
"tags": [],
"hidden": hidden,
+ "name": "instance-%s" % id,
}
instance.update(info_cache)
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index 7f38b48d3a..49ea322856 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -69,7 +69,6 @@ from nova.volume import cinder
CONF = nova.conf.CONF
-FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
SHELVED_IMAGE = 'fake-shelved-image'
SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound'
@@ -147,13 +146,16 @@ class _ComputeAPIUnitTestMixIn(object):
instance.uuid = uuidutils.generate_uuid()
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
- instance.image_ref = FAKE_IMAGE_REF
+ instance.image_ref = uuids.image_ref
instance.reservation_id = 'r-fakeres'
instance.user_id = self.user_id
instance.project_id = self.project_id
instance.host = 'fake_host'
instance.node = NODENAME
instance.instance_type_id = flavor.id
+ instance.flavor = flavor
+ instance.old_flavor = None
+ instance.new_flavor = None
instance.ami_launch_index = 0
instance.memory_mb = 0
instance.vcpus = 0
@@ -168,8 +170,6 @@ class _ComputeAPIUnitTestMixIn(object):
instance.disable_terminate = False
instance.info_cache = objects.InstanceInfoCache()
instance.info_cache.network_info = model.NetworkInfo()
- instance.flavor = flavor
- instance.old_flavor = instance.new_flavor = None
instance.numa_topology = None
if params:
@@ -215,7 +215,7 @@ class _ComputeAPIUnitTestMixIn(object):
get_image.return_value = (None, {})
check_requested_networks.return_value = 1
- instance_type = self._create_flavor()
+ flavor = self._create_flavor()
port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
@@ -226,7 +226,7 @@ class _ComputeAPIUnitTestMixIn(object):
with mock.patch.object(self.compute_api.network_api,
'create_resource_requests',
return_value=(None, [])):
- self.compute_api.create(self.context, instance_type, 'image_id',
+ self.compute_api.create(self.context, flavor, 'image_id',
requested_networks=requested_networks,
max_count=None)
@@ -239,7 +239,7 @@ class _ComputeAPIUnitTestMixIn(object):
mock_limit_check, mock_count):
image_href = "image_href"
image_id = 0
- instance_type = self._create_flavor()
+ flavor = self._create_flavor()
quotas = {'instances': 1, 'cores': 1, 'ram': 1}
quota_exception = exception.OverQuota(quotas=quotas,
@@ -259,7 +259,7 @@ class _ComputeAPIUnitTestMixIn(object):
return_value=(image_id, {})) as mock_get_image:
for min_count, message in [(20, '20-40'), (40, '40')]:
try:
- self.compute_api.create(self.context, instance_type,
+ self.compute_api.create(self.context, flavor,
"image_href", min_count=min_count,
max_count=40)
except exception.TooManyInstances as e:
@@ -277,7 +277,7 @@ class _ComputeAPIUnitTestMixIn(object):
# creating a volume-backed instance
self.assertRaises(exception.CertificateValidationFailed,
self.compute_api.create, self.context,
- instance_type=self._create_flavor(), image_href=None,
+ flavor=self._create_flavor(), image_href=None,
trusted_certs=['test-cert-1', 'test-cert-2'])
@mock.patch('nova.objects.Quotas.limit_check')
@@ -290,7 +290,7 @@ class _ComputeAPIUnitTestMixIn(object):
# creating a volume-backed instance
self.assertRaises(exception.CertificateValidationFailed,
self.compute_api.create, self.context,
- instance_type=self._create_flavor(),
+ flavor=self._create_flavor(),
image_href=None)
def _test_create_max_net_count(self, max_net_count, min_count, max_count):
@@ -3762,7 +3762,7 @@ class _ComputeAPIUnitTestMixIn(object):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
- system_metadata={}, image_ref='foo',
+ system_metadata={}, image_ref=uuids.image_ref,
expected_attrs=['system_metadata'])
image_id = self._setup_fake_image_with_invalid_arch()
self.assertRaises(exception.InvalidArchitectureName,
@@ -3793,7 +3793,7 @@ class _ComputeAPIUnitTestMixIn(object):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
- system_metadata={}, image_ref='foo',
+ system_metadata={}, image_ref=uuids.image_ref,
expected_attrs=['system_metadata'])
bdms = objects.BlockDeviceMappingList(objects=[
@@ -3807,8 +3807,8 @@ class _ComputeAPIUnitTestMixIn(object):
get_flavor.return_value = test_flavor.fake_flavor
flavor = instance.get_flavor()
- image_href = 'foo'
image = {
+ "id": uuids.image_ref,
"min_ram": 10, "min_disk": 1,
"properties": {
'architecture': fields_obj.Architecture.X86_64}}
@@ -3824,7 +3824,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.compute_api.rebuild,
self.context,
instance,
- image_href,
+ uuids.image_ref,
"new password")
self.assertIsNone(instance.task_state)
mock_get_bdms.assert_called_once_with(self.context,
@@ -3854,12 +3854,12 @@ class _ComputeAPIUnitTestMixIn(object):
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
- image_ref='foo',
+ image_ref=uuids.image_ref,
expected_attrs=['system_metadata'])
get_flavor.return_value = test_flavor.fake_flavor
flavor = instance.get_flavor()
- image_href = 'foo'
image = {
+ "id": uuids.image_ref,
"min_ram": 10, "min_disk": 1,
"properties": {
'architecture': fields_obj.Architecture.X86_64}}
@@ -3875,13 +3875,14 @@ class _ComputeAPIUnitTestMixIn(object):
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
- self.compute_api.rebuild(self.context, instance, image_href,
- admin_pass, files_to_inject)
+ self.compute_api.rebuild(
+ self.context, instance, uuids.image_ref,
+ admin_pass, files_to_inject)
rebuild_instance.assert_called_once_with(self.context,
instance=instance, new_pass=admin_pass,
- injected_files=files_to_inject, image_ref=image_href,
- orig_image_ref=image_href,
+ injected_files=files_to_inject, image_ref=uuids.image_ref,
+ orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
request_spec=fake_spec)
@@ -3909,13 +3910,13 @@ class _ComputeAPIUnitTestMixIn(object):
req_spec_get_by_inst_uuid, req_spec_save):
orig_system_metadata = {}
get_flavor.return_value = test_flavor.fake_flavor
- orig_image_href = 'orig_image'
orig_image = {
+ "id": uuids.image_ref,
"min_ram": 10, "min_disk": 1,
"properties": {'architecture': fields_obj.Architecture.X86_64,
'vm_mode': 'hvm'}}
- new_image_href = 'new_image'
new_image = {
+ "id": uuids.new_image_ref,
"min_ram": 10, "min_disk": 1,
"properties": {'architecture': fields_obj.Architecture.X86_64,
'vm_mode': 'xen'}}
@@ -3928,15 +3929,15 @@ class _ComputeAPIUnitTestMixIn(object):
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'],
- image_ref=orig_image_href,
+ image_ref=uuids.image_ref,
node='node',
vm_mode=fields_obj.VMMode.HVM)
flavor = instance.get_flavor()
def get_image(context, image_href):
- if image_href == new_image_href:
+ if image_href == uuids.new_image_ref:
return (None, new_image)
- if image_href == orig_image_href:
+ if image_href == uuids.image_ref:
return (None, orig_image)
_get_image.side_effect = get_image
bdm_get_by_instance_uuid.return_value = bdms
@@ -3946,13 +3947,15 @@ class _ComputeAPIUnitTestMixIn(object):
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
- self.compute_api.rebuild(self.context, instance, new_image_href,
- admin_pass, files_to_inject)
+ self.compute_api.rebuild(
+ self.context, instance, uuids.new_image_ref, admin_pass,
+ files_to_inject)
rebuild_instance.assert_called_once_with(self.context,
instance=instance, new_pass=admin_pass,
- injected_files=files_to_inject, image_ref=new_image_href,
- orig_image_ref=orig_image_href,
+ injected_files=files_to_inject,
+ image_ref=uuids.new_image_ref,
+ orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=None,
request_spec=fake_spec)
@@ -3989,14 +3992,14 @@ class _ComputeAPIUnitTestMixIn(object):
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
- image_ref='foo',
+ image_ref=uuids.image_ref,
expected_attrs=['system_metadata'],
key_name=orig_key_name,
key_data=orig_key_data)
get_flavor.return_value = test_flavor.fake_flavor
flavor = instance.get_flavor()
- image_href = 'foo'
image = {
+ "id": uuids.image_ref,
"min_ram": 10, "min_disk": 1,
"properties": {'architecture': fields_obj.Architecture.X86_64,
'vm_mode': 'hvm'}}
@@ -4014,13 +4017,13 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_keypair.return_value = keypair
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
- self.compute_api.rebuild(self.context, instance, image_href,
+ self.compute_api.rebuild(self.context, instance, uuids.image_ref,
admin_pass, files_to_inject, key_name=keypair.name)
rebuild_instance.assert_called_once_with(self.context,
instance=instance, new_pass=admin_pass,
- injected_files=files_to_inject, image_ref=image_href,
- orig_image_ref=image_href,
+ injected_files=files_to_inject, image_ref=uuids.image_ref,
+ orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
request_spec=fake_spec)
@@ -4050,13 +4053,13 @@ class _ComputeAPIUnitTestMixIn(object):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
- system_metadata=orig_system_metadata, image_ref='foo',
+ system_metadata=orig_system_metadata, image_ref=uuids.image_ref,
expected_attrs=['system_metadata'],
trusted_certs=orig_trusted_certs)
get_flavor.return_value = test_flavor.fake_flavor
flavor = instance.get_flavor()
- image_href = 'foo'
image = {
+ "id": uuids.image_ref,
"min_ram": 10, "min_disk": 1,
"properties": {'architecture': fields_obj.Architecture.X86_64,
'vm_mode': 'hvm'}}
@@ -4072,14 +4075,14 @@ class _ComputeAPIUnitTestMixIn(object):
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
- self.compute_api.rebuild(self.context, instance, image_href,
+ self.compute_api.rebuild(self.context, instance, uuids.image_ref,
admin_pass, files_to_inject,
trusted_certs=new_trusted_certs)
rebuild_instance.assert_called_once_with(
self.context, instance=instance, new_pass=admin_pass,
- injected_files=files_to_inject, image_ref=image_href,
- orig_image_ref=image_href,
+ injected_files=files_to_inject, image_ref=uuids.image_ref,
+ orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
request_spec=fake_spec)
@@ -4114,13 +4117,13 @@ class _ComputeAPIUnitTestMixIn(object):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
- system_metadata=orig_system_metadata, image_ref='foo',
+ system_metadata=orig_system_metadata, image_ref=uuids.image_ref,
expected_attrs=['system_metadata'],
trusted_certs=orig_trusted_certs)
get_flavor.return_value = test_flavor.fake_flavor
flavor = instance.get_flavor()
- image_href = 'foo'
image = {
+ "id": uuids.image_ref,
"min_ram": 10, "min_disk": 1,
"properties": {'architecture': fields_obj.Architecture.X86_64,
'vm_mode': 'hvm'}}
@@ -4136,14 +4139,14 @@ class _ComputeAPIUnitTestMixIn(object):
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
- self.compute_api.rebuild(self.context, instance, image_href,
- admin_pass, files_to_inject,
- trusted_certs=new_trusted_certs)
+ self.compute_api.rebuild(
+ self.context, instance, uuids.image_ref, admin_pass,
+ files_to_inject, trusted_certs=new_trusted_certs)
rebuild_instance.assert_called_once_with(
self.context, instance=instance, new_pass=admin_pass,
- injected_files=files_to_inject, image_ref=image_href,
- orig_image_ref=image_href,
+ injected_files=files_to_inject, image_ref=uuids.image_ref,
+ orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
request_spec=fake_spec)
@@ -4172,8 +4175,8 @@ class _ComputeAPIUnitTestMixIn(object):
system_metadata=orig_system_metadata, image_ref=None,
expected_attrs=['system_metadata'], trusted_certs=None)
get_flavor.return_value = test_flavor.fake_flavor
- image_href = 'foo'
image = {
+ "id": uuids.image_ref,
"min_ram": 10, "min_disk": 1,
"properties": {'architecture': fields_obj.Architecture.X86_64,
'vm_mode': 'hvm'}}
@@ -4186,7 +4189,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertRaises(exception.CertificateValidationFailed,
self.compute_api.rebuild, self.context, instance,
- image_href, admin_pass, files_to_inject,
+ uuids.image_ref, admin_pass, files_to_inject,
trusted_certs=new_trusted_certs)
_check_auto_disk_config.assert_called_once_with(
@@ -4671,7 +4674,7 @@ class _ComputeAPIUnitTestMixIn(object):
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_validate_bdm_with_cinder_down(self, mock_get_snapshot):
instance = self._create_instance_obj()
- instance_type = self._create_flavor()
+ flavor = self._create_flavor()
bdms = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
@@ -4686,7 +4689,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._validate_bdm,
self.context,
- instance, instance_type, bdms, image_cache, volumes)
+ instance, flavor, bdms, image_cache, volumes)
@mock.patch.object(cinder.API, 'attachment_create',
side_effect=exception.InvalidInput(reason='error'))
@@ -4696,7 +4699,7 @@ class _ComputeAPIUnitTestMixIn(object):
# 'available' results in _validate_bdm re-raising InvalidVolume.
instance = self._create_instance_obj()
del instance.id
- instance_type = self._create_flavor()
+ flavor = self._create_flavor()
volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8'
volume_info = {'status': 'error',
'attach_status': 'detached',
@@ -4714,7 +4717,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context,
- instance, instance_type, bdms, {},
+ instance, flavor, bdms, {},
{volume_id: volume_info})
mock_attach_create.assert_called_once_with(
@@ -4737,7 +4740,7 @@ class _ComputeAPIUnitTestMixIn(object):
"""Test _check_requested_volume_type method is used.
"""
instance = self._create_instance_obj()
- instance_type = self._create_flavor()
+ flavor = self._create_flavor()
volume_type = 'fake_lvm_1'
volume_types = [{'id': 'fake_volume_type_id_1', 'name': 'fake_lvm_1'},
@@ -4776,7 +4779,7 @@ class _ComputeAPIUnitTestMixIn(object):
image_cache = volumes = {}
self.compute_api._validate_bdm(self.context, instance,
- instance_type, bdms, image_cache,
+ flavor, bdms, image_cache,
volumes)
get_all_vol_types.assert_called_once_with(self.context)
@@ -4994,7 +4997,7 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Instance')
@mock.patch('nova.objects.InstanceMapping.create')
def _test_provision_instances_with_accels(self,
- instance_type, dp_request_groups, prev_request_groups,
+ flavor, dp_request_groups, prev_request_groups,
mock_im, mock_instance, mock_br, mock_rs, mock_get_dp):
@mock.patch.object(self.compute_api, '_get_volumes_for_bdms')
@@ -5011,7 +5014,7 @@ class _ComputeAPIUnitTestMixIn(object):
def do_test(mock_bdm_v, mock_sg, mock_cniq, mock_get_vols):
mock_cniq.return_value = 1
self.compute_api._provision_instances(self.context,
- instance_type,
+ flavor,
1, 1, mock.MagicMock(),
{}, None,
None, None, None, {}, None,
@@ -5031,7 +5034,7 @@ class _ComputeAPIUnitTestMixIn(object):
# should be obtained, and added to reqspec's requested_resources.
dp_name = 'mydp'
extra_specs = {'extra_specs': {'accel:device_profile': dp_name}}
- instance_type = self._create_flavor(**extra_specs)
+ flavor = self._create_flavor(**extra_specs)
prev_groups = [objects.RequestGroup(requester_id='prev0'),
objects.RequestGroup(requester_id='prev1')]
@@ -5039,7 +5042,7 @@ class _ComputeAPIUnitTestMixIn(object):
objects.RequestGroup(requester_id='deviceprofile3')]
mock_get_dp, fake_rs = self._test_provision_instances_with_accels(
- instance_type, dp_groups, prev_groups)
+ flavor, dp_groups, prev_groups)
mock_get_dp.assert_called_once_with(self.context, dp_name)
self.assertEqual(prev_groups + dp_groups, fake_rs.requested_resources)
@@ -5047,11 +5050,11 @@ class _ComputeAPIUnitTestMixIn(object):
# If extra specs has no accel spec, no attempt should be made to
# get device profile's request_groups, and reqspec.requested_resources
# should be left unchanged.
- instance_type = self._create_flavor()
+ flavor = self._create_flavor()
prev_groups = [objects.RequestGroup(requester_id='prev0'),
objects.RequestGroup(requester_id='prev1')]
mock_get_dp, fake_rs = self._test_provision_instances_with_accels(
- instance_type, [], prev_groups)
+ flavor, [], prev_groups)
mock_get_dp.assert_not_called()
self.assertEqual(prev_groups, fake_rs.requested_resources)
@@ -5697,7 +5700,7 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_detach_interface_invalid_state(state)
def _test_check_and_transform_bdm(self, block_device_mapping):
- instance_type = self._create_flavor()
+ flavor = self._create_flavor()
base_options = {'uuid': uuids.bdm_instance,
'image_ref': 'fake_image_ref',
'metadata': {}}
@@ -5710,7 +5713,7 @@ class _ComputeAPIUnitTestMixIn(object):
block_device_mapping = block_device_mapping
self.assertRaises(exception.InvalidRequest,
self.compute_api._check_and_transform_bdm,
- self.context, base_options, instance_type,
+ self.context, base_options, flavor,
image_meta, 1, 1, block_device_mapping, legacy_bdm)
def test_check_and_transform_bdm_source_volume(self):
@@ -5741,8 +5744,8 @@ class _ComputeAPIUnitTestMixIn(object):
swap_size = 42
ephemeral_size = 24
instance = self._create_instance_obj()
- instance_type = self._create_flavor(swap=swap_size,
- ephemeral_gb=ephemeral_size)
+ flavor = self._create_flavor(
+ swap=swap_size, ephemeral_gb=ephemeral_size)
block_device_mapping = [
{'device_name': '/dev/sda1',
'source_type': 'snapshot', 'destination_type': 'volume',
@@ -5765,7 +5768,7 @@ class _ComputeAPIUnitTestMixIn(object):
with mock.patch.object(self.compute_api, '_validate_bdm'):
image_cache = volumes = {}
bdms = self.compute_api._bdm_validate_set_size_and_instance(
- self.context, instance, instance_type, block_device_mapping,
+ self.context, instance, flavor, block_device_mapping,
image_cache, volumes)
expected = [{'device_name': '/dev/sda1',
@@ -7126,7 +7129,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
requested Neutron security group and that will be returned from
_validate_and_build_base_options
"""
- instance_type = objects.Flavor(**test_flavor.fake_flavor)
+ flavor = objects.Flavor(**test_flavor.fake_flavor)
boot_meta = metadata = {}
kernel_id = ramdisk_id = key_name = key_data = user_data = \
access_ip_v4 = access_ip_v6 = config_drive = \
@@ -7145,7 +7148,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
base_options, max_network_count, key_pair, security_groups, \
network_metadata = (
self.compute_api._validate_and_build_base_options(
- self.context, instance_type, boot_meta, uuids.image_href,
+ self.context, flavor, boot_meta, uuids.image_href,
mock.sentinel.image_id, kernel_id, ramdisk_id,
'fake-display-name', 'fake-description', key_name,
key_data, requested_secgroups, 'fake-az', user_data,
diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py
index 85e5ed40d1..abbf728f92 100644
--- a/nova/tests/unit/compute/test_claims.py
+++ b/nova/tests/unit/compute/test_claims.py
@@ -79,7 +79,7 @@ class ClaimTestCase(test.NoDBTestCase):
def _claim(self, limits=None, requests=None, **kwargs):
numa_topology = kwargs.pop('numa_topology', None)
instance = self._fake_instance(**kwargs)
- instance.flavor = self._fake_instance_type(**kwargs)
+ instance.flavor = self._fake_flavor(**kwargs)
if numa_topology:
db_numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
@@ -114,8 +114,8 @@ class ClaimTestCase(test.NoDBTestCase):
instance.update(**kwargs)
return fake_instance.fake_instance_obj(self.context, **instance)
- def _fake_instance_type(self, **kwargs):
- instance_type = {
+ def _fake_flavor(self, **kwargs):
+ flavor = {
'id': 1,
'name': 'fakeitype',
'memory_mb': 1024,
@@ -123,8 +123,8 @@ class ClaimTestCase(test.NoDBTestCase):
'root_gb': 10,
'ephemeral_gb': 5
}
- instance_type.update(**kwargs)
- return objects.Flavor(**instance_type)
+ flavor.update(**kwargs)
+ return objects.Flavor(**flavor)
def _fake_compute_node(self, values=None):
compute_node = {
@@ -323,7 +323,7 @@ class MoveClaimTestCase(ClaimTestCase):
def _claim(self, limits=None, requests=None,
image_meta=None, **kwargs):
- instance_type = self._fake_instance_type(**kwargs)
+ flavor = self._fake_flavor(**kwargs)
numa_topology = kwargs.pop('numa_topology', None)
image_meta = image_meta or {}
self.instance = self._fake_instance(**kwargs)
@@ -347,7 +347,7 @@ class MoveClaimTestCase(ClaimTestCase):
return_value=self.db_numa_topology)
def get_claim(mock_extra_get, mock_numa_get):
return claims.MoveClaim(
- self.context, self.instance, _NODENAME, instance_type,
+ self.context, self.instance, _NODENAME, flavor,
image_meta, self.tracker, self.compute_node, requests,
objects.Migration(migration_type='migration'), limits=limits)
return get_claim()
@@ -371,20 +371,20 @@ class MoveClaimTestCase(ClaimTestCase):
class LiveMigrationClaimTestCase(ClaimTestCase):
def test_live_migration_claim_bad_pci_request(self):
- instance_type = self._fake_instance_type()
+ flavor = self._fake_flavor()
instance = self._fake_instance()
instance.numa_topology = None
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
'PCI requests are not supported',
- claims.MoveClaim, self.context, instance, _NODENAME, instance_type,
+ claims.MoveClaim, self.context, instance, _NODENAME, flavor,
{}, self.tracker, self.compute_node,
objects.InstancePCIRequests(requests=[
objects.InstancePCIRequest(alias_name='fake-alias')]),
objects.Migration(migration_type='live-migration'), None)
def test_live_migration_page_size(self):
- instance_type = self._fake_instance_type()
+ flavor = self._fake_flavor()
instance = self._fake_instance()
instance.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
@@ -399,12 +399,12 @@ class LiveMigrationClaimTestCase(ClaimTestCase):
exception.ComputeResourcesUnavailable,
'Requested page size is different',
claims.MoveClaim, self.context, instance, _NODENAME,
- instance_type, {}, self.tracker, self.compute_node,
+ flavor, {}, self.tracker, self.compute_node,
self.empty_requests,
objects.Migration(migration_type='live-migration'), None)
def test_claim_fails_page_size_not_called(self):
- instance_type = self._fake_instance_type()
+ flavor = self._fake_flavor()
instance = self._fake_instance()
# This topology cannot fit in self.compute_node
# (see _fake_compute_node())
@@ -422,16 +422,16 @@ class LiveMigrationClaimTestCase(ClaimTestCase):
exception.ComputeResourcesUnavailable,
'Requested instance NUMA topology',
claims.MoveClaim, self.context, instance, _NODENAME,
- instance_type, {}, self.tracker, self.compute_node,
+ flavor, {}, self.tracker, self.compute_node,
self.empty_requests,
objects.Migration(migration_type='live-migration'), None)
mock_test_page_size.assert_not_called()
def test_live_migration_no_instance_numa_topology(self):
- instance_type = self._fake_instance_type()
+ flavor = self._fake_flavor()
instance = self._fake_instance()
instance.numa_topology = None
claims.MoveClaim(
- self.context, instance, _NODENAME, instance_type, {}, self.tracker,
+ self.context, instance, _NODENAME, flavor, {}, self.tracker,
self.compute_node, self.empty_requests,
objects.Migration(migration_type='live-migration'), None)
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 0487e79632..32480e349b 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -944,7 +944,7 @@ class ComputeVolumeTestCase(BaseTestCase):
def test_prepare_image_mapping(self):
swap_size = 1
ephemeral_size = 1
- instance_type = {'swap': swap_size,
+ flavor = {'swap': swap_size,
'ephemeral_gb': ephemeral_size}
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
@@ -957,7 +957,7 @@ class ComputeVolumeTestCase(BaseTestCase):
]
preped_bdm = self.compute_api._prepare_image_mapping(
- instance_type, mappings)
+ flavor, mappings)
expected_result = [
{
@@ -1010,7 +1010,7 @@ class ComputeVolumeTestCase(BaseTestCase):
image_id = '77777777-aaaa-bbbb-cccc-555555555555'
instance = self._create_fake_instance_obj()
- instance_type = {'swap': 1, 'ephemeral_gb': 2}
+ flavor = {'swap': 1, 'ephemeral_gb': 2}
mappings = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sdb4',
@@ -1058,7 +1058,7 @@ class ComputeVolumeTestCase(BaseTestCase):
volume_id: fake_get(None, None, volume_id)
}
self.compute_api._validate_bdm(self.context, instance,
- instance_type, mappings, {},
+ flavor, mappings, {},
volumes)
self.assertEqual(4, mappings[1].volume_size)
self.assertEqual(6, mappings[2].volume_size)
@@ -1067,7 +1067,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings[2].boot_index = 2
self.assertRaises(exception.InvalidBDMBootSequence,
self.compute_api._validate_bdm,
- self.context, instance, instance_type,
+ self.context, instance, flavor,
mappings, {}, volumes)
mappings[2].boot_index = 0
@@ -1075,7 +1075,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.flags(max_local_block_devices=1)
self.assertRaises(exception.InvalidBDMLocalsLimit,
self.compute_api._validate_bdm,
- self.context, instance, instance_type,
+ self.context, instance, flavor,
mappings, {}, volumes)
ephemerals = [
fake_block_device.FakeDbBlockDeviceDict({
@@ -1105,7 +1105,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_ = mappings[:]
mappings_.objects.extend(ephemerals)
self.compute_api._validate_bdm(self.context, instance,
- instance_type, mappings_, {},
+ flavor, mappings_, {},
volumes)
# Ephemerals over the size limit
@@ -1114,14 +1114,14 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_.objects.extend(ephemerals)
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
- self.context, instance, instance_type,
+ self.context, instance, flavor,
mappings_, {}, volumes)
# Swap over the size limit
mappings[0].volume_size = 3
self.assertRaises(exception.InvalidBDMSwapSize,
self.compute_api._validate_bdm,
- self.context, instance, instance_type,
+ self.context, instance, flavor,
mappings, {}, volumes)
mappings[0].volume_size = 1
@@ -1144,7 +1144,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_.objects.extend(additional_swap)
self.assertRaises(exception.InvalidBDMFormat,
self.compute_api._validate_bdm,
- self.context, instance, instance_type,
+ self.context, instance, flavor,
mappings_, {}, volumes)
image_no_size = [
@@ -1163,7 +1163,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_.objects.extend(image_no_size)
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
- self.context, instance, instance_type,
+ self.context, instance, flavor,
mappings_, {}, volumes)
# blank device without a specified size fails
@@ -1182,11 +1182,11 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_.objects.extend(blank_no_size)
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
- self.context, instance, instance_type,
+ self.context, instance, flavor,
mappings_, {}, volumes)
def test_validate_bdm_with_more_than_one_default(self):
- instance_type = {'swap': 1, 'ephemeral_gb': 1}
+ flavor = {'swap': 1, 'ephemeral_gb': 1}
all_mappings = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
@@ -1217,13 +1217,13 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, self.instance,
- instance_type, all_mappings, image_cache, volumes)
+ flavor, all_mappings, image_cache, volumes)
@mock.patch.object(cinder.API, 'attachment_create',
side_effect=exception.InvalidVolume(reason='error'))
def test_validate_bdm_media_service_invalid_volume(self, mock_att_create):
volume_id = uuids.volume_id
- instance_type = {'swap': 1, 'ephemeral_gb': 1}
+ flavor = {'swap': 1, 'ephemeral_gb': 1}
bdms = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
@@ -1263,7 +1263,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context, self.instance_object,
- instance_type, bdms, {}, volumes)
+ flavor, bdms, {}, volumes)
@mock.patch.object(cinder.API, 'check_availability_zone')
@mock.patch.object(cinder.API, 'attachment_create',
@@ -1271,7 +1271,7 @@ class ComputeVolumeTestCase(BaseTestCase):
def test_validate_bdm_media_service_valid(self, mock_att_create,
mock_check_av_zone):
volume_id = uuids.volume_id
- instance_type = {'swap': 1, 'ephemeral_gb': 1}
+ flavor = {'swap': 1, 'ephemeral_gb': 1}
bdms = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
@@ -1292,7 +1292,7 @@ class ComputeVolumeTestCase(BaseTestCase):
image_cache = {}
volumes = {volume_id: volume}
self.compute_api._validate_bdm(self.context, self.instance_object,
- instance_type, bdms, image_cache,
+ flavor, bdms, image_cache,
volumes)
mock_check_av_zone.assert_not_called()
mock_att_create.assert_called_once_with(
@@ -1620,7 +1620,7 @@ class ComputeTestCase(BaseTestCase,
self.assertRaises(exception.MultiplePortsNotApplicable,
self.compute_api.create,
self.context,
- instance_type=self.default_flavor,
+ flavor=self.default_flavor,
image_href=None,
max_count=2,
requested_networks=requested_networks)
@@ -2930,13 +2930,13 @@ class ComputeTestCase(BaseTestCase,
power_state=10003,
vm_state=vm_states.ACTIVE,
task_state=expected_task,
- instance_type=self.default_flavor,
+ flavor=self.default_flavor,
launched_at=timeutils.utcnow()))
updated_dbinstance2 = fake_instance.fake_db_instance(
**dict(uuid=uuids.db_instance_2,
power_state=10003,
vm_state=vm_states.ACTIVE,
- instance_type=self.default_flavor,
+ flavor=self.default_flavor,
task_state=expected_task,
launched_at=timeutils.utcnow()))
@@ -4551,7 +4551,7 @@ class ComputeTestCase(BaseTestCase,
migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
migration.uuid = uuids.migration_uuid
migration.new_instance_type_id = '1'
- instance_type = objects.Flavor()
+ flavor = objects.Flavor()
actions = [
("reboot_instance", task_states.REBOOTING,
@@ -4593,7 +4593,7 @@ class ComputeTestCase(BaseTestCase,
'request_spec': {}}),
("prep_resize", task_states.RESIZE_PREP,
{'image': {},
- 'flavor': instance_type,
+ 'flavor': flavor,
'request_spec': {},
'filter_properties': {},
'node': None,
@@ -4683,18 +4683,18 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj(params)
image = {}
disk_info = 'fake-disk-info'
- instance_type = self.default_flavor
+ flavor = self.default_flavor
if not resize_instance:
- old_instance_type = self.tiny_flavor
- instance_type['root_gb'] = old_instance_type['root_gb']
- instance_type['swap'] = old_instance_type['swap']
- instance_type['ephemeral_gb'] = old_instance_type['ephemeral_gb']
+ old_flavor = self.tiny_flavor
+ flavor['root_gb'] = old_flavor['root_gb']
+ flavor['swap'] = old_flavor['swap']
+ flavor['ephemeral_gb'] = old_flavor['ephemeral_gb']
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
- flavor=instance_type,
+ flavor=flavor,
image={}, request_spec={},
filter_properties={}, node=None,
migration=None, clean_shutdown=True,
@@ -4762,7 +4762,7 @@ class ComputeTestCase(BaseTestCase,
def _instance_save0(expected_task_state=None):
self.assertEqual(task_states.RESIZE_MIGRATED,
expected_task_state)
- self.assertEqual(instance_type['id'],
+ self.assertEqual(flavor['id'],
instance.instance_type_id)
self.assertEqual(task_states.RESIZE_FINISH,
instance.task_state)
@@ -4932,11 +4932,11 @@ class ComputeTestCase(BaseTestCase,
jsonutils.dumps(connection_info))
# begin resize
- instance_type = self.default_flavor
+ flavor = self.default_flavor
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
- flavor=instance_type,
+ flavor=flavor,
image={}, request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
@@ -4953,7 +4953,8 @@ class ComputeTestCase(BaseTestCase,
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={},
- flavor=jsonutils.to_primitive(instance_type),
+ # TODO(stephenfin): Why a JSON string?
+ flavor=jsonutils.to_primitive(flavor),
clean_shutdown=True, request_spec=request_spec)
# assert bdm is unchanged
@@ -5020,12 +5021,12 @@ class ComputeTestCase(BaseTestCase,
old_flavor_name = 'm1.tiny'
instance = self._create_fake_instance_obj(type_name=old_flavor_name)
- instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
+ flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
request_spec = objects.RequestSpec()
self.compute.prep_resize(self.context, instance=instance,
- flavor=instance_type,
+ flavor=flavor,
image={},
request_spec=request_spec,
filter_properties={},
@@ -5053,7 +5054,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(old_flavor['root_gb'], instance.root_gb)
self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb)
self.assertEqual(old_flavor['id'], instance.instance_type_id)
- self.assertNotEqual(instance_type['id'], instance.instance_type_id)
+ self.assertNotEqual(flavor['id'], instance.instance_type_id)
def test_set_instance_info(self):
old_flavor_name = 'm1.tiny'
@@ -5482,10 +5483,10 @@ class ComputeTestCase(BaseTestCase,
instance.numa_topology = numa_topology
instance.save()
- new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
+ new_flavor_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
- flavor=new_instance_type_ref,
+ flavor=new_flavor_ref,
image={}, request_spec=request_spec,
filter_properties={}, node=None, clean_shutdown=True,
migration=None, host_list=None)
@@ -5493,7 +5494,7 @@ class ComputeTestCase(BaseTestCase,
# Memory usage should increase after the resize as well
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
memory_mb_used + flavor.memory_mb +
- new_instance_type_ref.memory_mb)
+ new_flavor_ref.memory_mb)
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
@@ -5512,7 +5513,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
- flavor=new_instance_type_ref,
+ flavor=new_flavor_ref,
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
@@ -5523,7 +5524,7 @@ class ComputeTestCase(BaseTestCase,
# Memory usage shouldn't had changed
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
memory_mb_used + flavor.memory_mb +
- new_instance_type_ref.memory_mb)
+ new_flavor_ref.memory_mb)
# Prove that the instance size is now the new size
flavor = objects.Flavor.get_by_id(self.context,
@@ -5548,7 +5549,7 @@ class ComputeTestCase(BaseTestCase,
# Resources from the migration (based on initial flavor) should
# be freed now
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
- memory_mb_used + new_instance_type_ref.memory_mb)
+ memory_mb_used + new_flavor_ref.memory_mb)
mock_notify.assert_has_calls([
mock.call(self.context, instance,
@@ -5819,10 +5820,10 @@ class ComputeTestCase(BaseTestCase,
instance.numa_topology = numa_topology
instance.save()
- new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
+ new_flavor_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
- flavor=new_instance_type_ref,
+ flavor=new_flavor_ref,
image={}, request_spec=request_spec,
filter_properties={}, node=None,
migration=None, clean_shutdown=True, host_list=[])
@@ -5830,7 +5831,7 @@ class ComputeTestCase(BaseTestCase,
# Memory usage should increase after the resize as well
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
memory_mb_used + flavor.memory_mb +
- new_instance_type_ref.memory_mb)
+ new_flavor_ref.memory_mb)
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
@@ -5848,7 +5849,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
- flavor=new_instance_type_ref,
+ flavor=new_flavor_ref,
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
@@ -5859,11 +5860,11 @@ class ComputeTestCase(BaseTestCase,
# Memory usage shouldn't had changed
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
memory_mb_used + flavor.memory_mb +
- new_instance_type_ref.memory_mb)
+ new_flavor_ref.memory_mb)
# Prove that the instance size is now the new size
- instance_type_ref = flavors.get_flavor_by_flavor_id(3)
- self.assertEqual(instance_type_ref['flavorid'], '3')
+ flavor_ref = flavors.get_flavor_by_flavor_id(3)
+ self.assertEqual(flavor_ref['flavorid'], '3')
# Prove that the NUMA topology has also been updated to that of the new
# flavor - meaning None
self.assertIsNone(instance.numa_topology)
@@ -5955,10 +5956,10 @@ class ComputeTestCase(BaseTestCase,
request_spec, {},
[], block_device_mapping=[])
- new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
+ new_flavor_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
- flavor=new_instance_type_ref,
+ flavor=new_flavor_ref,
image={}, request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
@@ -5976,7 +5977,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
- flavor=new_instance_type_ref,
+ flavor=new_flavor_ref,
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
@@ -8610,11 +8611,12 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_instance_sets_system_metadata(self):
# Make sure image properties are copied into system metadata.
- with mock.patch.object(self.compute_api.compute_task_api,
- 'schedule_and_build_instances') as mock_sbi:
- (ref, resv_id) = self.compute_api.create(
+ with mock.patch.object(
+ self.compute_api.compute_task_api, 'schedule_and_build_instances',
+ ) as mock_sbi:
+ ref, resv_id = self.compute_api.create(
self.context,
- instance_type=self.default_flavor,
+ flavor=self.default_flavor,
image_href='f5000000-0000-0000-0000-000000000000')
build_call = mock_sbi.call_args_list[0]
@@ -8628,11 +8630,12 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(value, instance.system_metadata[key])
def test_create_saves_flavor(self):
- with mock.patch.object(self.compute_api.compute_task_api,
- 'schedule_and_build_instances') as mock_sbi:
- (ref, resv_id) = self.compute_api.create(
+ with mock.patch.object(
+ self.compute_api.compute_task_api, 'schedule_and_build_instances',
+ ) as mock_sbi:
+ ref, resv_id = self.compute_api.create(
self.context,
- instance_type=self.default_flavor,
+ flavor=self.default_flavor,
image_href=uuids.image_href_id)
build_call = mock_sbi.call_args_list[0]
@@ -8652,7 +8655,7 @@ class ComputeAPITestCase(BaseTestCase):
) as (mock_sbi, mock_secgroups):
self.compute_api.create(
self.context,
- instance_type=self.default_flavor,
+ flavor=self.default_flavor,
image_href=uuids.image_href_id,
security_groups=['testgroup'])
@@ -8669,12 +8672,13 @@ class ComputeAPITestCase(BaseTestCase):
'nova.network.security_group_api.validate_name',
side_effect=exception.SecurityGroupNotFound('foo'),
) as mock_secgroups:
- self.assertRaises(exception.SecurityGroupNotFound,
- self.compute_api.create,
- self.context,
- instance_type=self.default_flavor,
- image_href=None,
- security_groups=['invalid_sec_group'])
+ self.assertRaises(
+ exception.SecurityGroupNotFound,
+ self.compute_api.create,
+ self.context,
+ flavor=self.default_flavor,
+ image_href=None,
+ security_groups=['invalid_sec_group'])
self.assertEqual(pre_build_len,
len(db.instance_get_all(self.context)))
@@ -8695,7 +8699,7 @@ class ComputeAPITestCase(BaseTestCase):
) as (mock_sbi, _mock_create_resreqs):
self.compute_api.create(
self.context,
- instance_type=self.default_flavor,
+ flavor=self.default_flavor,
image_href=uuids.image_href_id,
requested_networks=requested_networks)
@@ -8735,14 +8739,14 @@ class ComputeAPITestCase(BaseTestCase):
instance = objects.Instance()
instance.update(base_options)
instance = self.compute_api._populate_instance_for_create(
- self.context,
- instance,
- self.fake_image,
- 1,
- security_groups=objects.SecurityGroupList(),
- instance_type=self.tiny_flavor,
- num_instances=num_instances,
- shutdown_terminate=False)
+ self.context,
+ instance,
+ self.fake_image,
+ 1,
+ security_groups=objects.SecurityGroupList(),
+ flavor=self.tiny_flavor,
+ num_instances=num_instances,
+ shutdown_terminate=False)
self.assertEqual(str(base_options['image_ref']),
instance['system_metadata']['image_base_image_ref'])
self.assertEqual(vm_states.BUILDING, instance['vm_state'])
@@ -8770,14 +8774,14 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.key_manager = key_manager.API()
index = 1
instance = self.compute_api._populate_instance_for_create(
- self.context,
- instance,
- self.fake_image,
- index,
- security_groups=objects.SecurityGroupList(),
- instance_type=self.tiny_flavor,
- num_instances=num_instances,
- shutdown_terminate=False)
+ self.context,
+ instance,
+ self.fake_image,
+ index,
+ security_groups=objects.SecurityGroupList(),
+ flavor=self.tiny_flavor,
+ num_instances=num_instances,
+ shutdown_terminate=False)
self.assertIsNotNone(instance.ephemeral_key_uuid)
def test_default_hostname_generator(self):
@@ -8959,14 +8963,17 @@ class ComputeAPITestCase(BaseTestCase):
sys_meta = {k: v for k, v in instance.system_metadata.items()
if not k.startswith('instance_type')}
self.assertEqual(
- {'image_kernel_id': uuids.kernel_id,
- 'image_min_disk': '1',
- 'image_ramdisk_id': uuids.ramdisk_id,
- 'image_something_else': 'meow',
- 'preserved': 'preserve this!',
- 'image_base_image_ref': image_ref,
- 'boot_roles': ''},
- sys_meta)
+ {
+ 'image_kernel_id': uuids.kernel_id,
+ 'image_min_disk': '1',
+ 'image_ramdisk_id': uuids.ramdisk_id,
+ 'image_something_else': 'meow',
+ 'preserved': 'preserve this!',
+ 'image_base_image_ref': image_ref,
+ 'boot_roles': ''
+ },
+ sys_meta
+ )
def test_rebuild(self):
self._test_rebuild(vm_state=vm_states.ACTIVE)
@@ -10531,7 +10538,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = objects.Instance(
id=42,
uuid=uuids.interface_failed_instance,
- image_ref='foo',
+ image_ref=uuids.image_ref,
system_metadata={},
flavor=new_type,
host='fake-host')
@@ -10595,7 +10602,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = objects.Instance(
id=42,
uuid=uuids.interface_failed_instance,
- image_ref='foo',
+ image_ref=uuids.image_ref,
system_metadata={},
flavor=new_type,
host='fake-host',
@@ -13078,13 +13085,13 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
self.compute_api = compute.API()
self.inst_type = objects.Flavor.get_by_name(self.context, 'm1.small')
- def test_can_build_instance_from_visible_instance_type(self):
+ def test_can_build_instance_from_visible_flavor(self):
self.inst_type['disabled'] = False
# Assert that exception.FlavorNotFound is not raised
self.compute_api.create(self.context, self.inst_type,
image_href=uuids.image_instance)
- def test_cannot_build_instance_from_disabled_instance_type(self):
+ def test_cannot_build_instance_from_disabled_flavor(self):
self.inst_type['disabled'] = True
self.assertRaises(exception.FlavorNotFound,
self.compute_api.create, self.context, self.inst_type, None)
@@ -13093,7 +13100,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
new=mock.Mock(return_value=obj_fields.HostStatus.UP))
@mock.patch('nova.compute.api.API._validate_flavor_image_nostatus')
@mock.patch('nova.objects.RequestSpec')
- def test_can_resize_to_visible_instance_type(self, mock_reqspec,
+ def test_can_resize_to_visible_flavor(self, mock_reqspec,
mock_validate):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id =\
@@ -13101,11 +13108,11 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
- instance_type = orig_get_flavor_by_flavor_id(flavor_id,
+ flavor = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
- instance_type['disabled'] = False
- return instance_type
+ flavor['disabled'] = False
+ return flavor
self.stub_out('nova.compute.flavors.get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
@@ -13115,18 +13122,18 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
@mock.patch('nova.compute.api.API.get_instance_host_status',
new=mock.Mock(return_value=obj_fields.HostStatus.UP))
- def test_cannot_resize_to_disabled_instance_type(self):
+ def test_cannot_resize_to_disabled_flavor(self):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id = \
flavors.get_flavor_by_flavor_id
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
- instance_type = orig_get_flavor_by_flavor_id(flavor_id,
+ flavor = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
- instance_type['disabled'] = True
- return instance_type
+ flavor['disabled'] = True
+ return flavor
self.stub_out('nova.compute.flavors.get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
@@ -13147,7 +13154,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp()
self.instance = self._create_fake_instance_obj()
self.instance_uuid = self.instance['uuid']
- self.instance_type = objects.Flavor.get_by_name(
+ self.flavor = objects.Flavor.get_by_name(
context.get_admin_context(), 'm1.tiny')
self.request_spec = objects.RequestSpec()
@@ -13163,14 +13170,14 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.compute.prep_resize(self.context, image=None,
instance=inst_obj,
- flavor=self.instance_type,
+ flavor=self.flavor,
request_spec=self.request_spec,
filter_properties={}, migration=mock.Mock(),
node=None,
clean_shutdown=True, host_list=None)
mock_res.assert_called_once_with(mock.ANY, inst_obj, mock.ANY,
- self.instance_type,
+ self.flavor,
self.request_spec, {}, None)
def test_reschedule_resize_or_reraise_no_filter_properties(self):
@@ -13189,7 +13196,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
# because we're not retrying, we should re-raise the exception
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
- self.instance, exc_info, self.instance_type,
+ self.instance, exc_info, self.flavor,
self.request_spec, filter_properties, None)
def test_reschedule_resize_or_reraise_no_retry_info(self):
@@ -13208,7 +13215,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
# because we're not retrying, we should re-raise the exception
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
- self.instance, exc_info, self.instance_type,
+ self.instance, exc_info, self.flavor,
self.request_spec, filter_properties, None)
@mock.patch.object(compute_manager.ComputeManager, '_instance_update')
@@ -13230,14 +13237,14 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
- self.instance, exc_info, self.instance_type,
+ self.instance, exc_info, self.flavor,
self.request_spec, filter_properties, None)
mock_update.assert_called_once_with(
self.context, mock.ANY, task_state=task_states.RESIZE_PREP)
mock_resize.assert_called_once_with(
self.context, mock.ANY,
- {'filter_properties': filter_properties}, self.instance_type,
+ {'filter_properties': filter_properties}, self.flavor,
request_spec=self.request_spec, host_list=None)
mock_notify.assert_called_once_with(
self.context, self.instance, 'fake-mini', action='resize',
@@ -13260,14 +13267,14 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
exc_info = sys.exc_info()
self.compute._reschedule_resize_or_reraise(
- self.context, self.instance, exc_info, self.instance_type,
+ self.context, self.instance, exc_info, self.flavor,
self.request_spec, filter_properties, None)
mock_update.assert_called_once_with(
self.context, mock.ANY, task_state=task_states.RESIZE_PREP)
mock_resize.assert_called_once_with(
self.context, mock.ANY,
- {'filter_properties': filter_properties}, self.instance_type,
+ {'filter_properties': filter_properties}, self.flavor,
request_spec=self.request_spec, host_list=None)
mock_notify.assert_called_once_with(
self.context, self.instance, 'fake-mini', action='resize',
@@ -13813,91 +13820,91 @@ class CheckRequestedImageTestCase(test.TestCase):
self.context = context.RequestContext(
'fake_user_id', 'fake_project_id')
- self.instance_type = objects.Flavor.get_by_name(self.context,
+ self.flavor = objects.Flavor.get_by_name(self.context,
'm1.small')
- self.instance_type['memory_mb'] = 64
- self.instance_type['root_gb'] = 1
+ self.flavor['memory_mb'] = 64
+ self.flavor['root_gb'] = 1
def test_no_image_specified(self):
self.compute_api._validate_flavor_image(self.context, None, None,
- self.instance_type, None)
+ self.flavor, None)
def test_image_status_must_be_active(self):
image = dict(id=uuids.image_id, status='foo')
self.assertRaises(exception.ImageNotActive,
self.compute_api._validate_flavor_image, self.context,
- image['id'], image, self.instance_type, None)
+ image['id'], image, self.flavor, None)
image['status'] = 'active'
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, None)
+ image, self.flavor, None)
def test_image_min_ram_check(self):
image = dict(id=uuids.image_id, status='active', min_ram='65')
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api._validate_flavor_image, self.context,
- image['id'], image, self.instance_type, None)
+ image['id'], image, self.flavor, None)
image['min_ram'] = '64'
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, None)
+ image, self.flavor, None)
def test_image_min_disk_check(self):
image = dict(id=uuids.image_id, status='active', min_disk='2')
self.assertRaises(exception.FlavorDiskSmallerThanMinDisk,
self.compute_api._validate_flavor_image, self.context,
- image['id'], image, self.instance_type, None)
+ image['id'], image, self.flavor, None)
image['min_disk'] = '1'
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, None)
+ image, self.flavor, None)
def test_image_too_large(self):
image = dict(id=uuids.image_id, status='active', size='1073741825')
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self.compute_api._validate_flavor_image, self.context,
- image['id'], image, self.instance_type, None)
+ image['id'], image, self.flavor, None)
image['size'] = '1073741824'
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, None)
+ image, self.flavor, None)
def test_root_gb_zero_disables_size_check(self):
self.policy.set_rules({
servers_policy.ZERO_DISK_FLAVOR: base_policy.RULE_ADMIN_OR_OWNER
}, overwrite=False)
- self.instance_type['root_gb'] = 0
+ self.flavor['root_gb'] = 0
image = dict(id=uuids.image_id, status='active', size='1073741825')
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, None)
+ image, self.flavor, None)
def test_root_gb_zero_disables_min_disk(self):
self.policy.set_rules({
servers_policy.ZERO_DISK_FLAVOR: base_policy.RULE_ADMIN_OR_OWNER
}, overwrite=False)
- self.instance_type['root_gb'] = 0
+ self.flavor['root_gb'] = 0
image = dict(id=uuids.image_id, status='active', min_disk='2')
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, None)
+ image, self.flavor, None)
def test_config_drive_option(self):
image = {'id': uuids.image_id, 'status': 'active'}
image['properties'] = {'img_config_drive': 'optional'}
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, None)
+ image, self.flavor, None)
image['properties'] = {'img_config_drive': 'mandatory'}
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, None)
+ image, self.flavor, None)
image['properties'] = {'img_config_drive': 'bar'}
self.assertRaises(exception.InvalidImageConfigDrive,
self.compute_api._validate_flavor_image,
- self.context, image['id'], image, self.instance_type,
+ self.context, image['id'], image, self.flavor,
None)
def test_volume_blockdevicemapping(self):
@@ -13907,42 +13914,42 @@ class CheckRequestedImageTestCase(test.TestCase):
# larger than the flavor root disk.
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
- size=self.instance_type.root_gb * units.Gi,
- min_disk=self.instance_type.root_gb + 1)
+ size=self.flavor.root_gb * units.Gi,
+ min_disk=self.flavor.root_gb + 1)
volume_uuid = uuids.fake_2
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='volume', destination_type='volume',
- volume_id=volume_uuid, volume_size=self.instance_type.root_gb + 1)
+ volume_id=volume_uuid, volume_size=self.flavor.root_gb + 1)
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, root_bdm)
+ image, self.flavor, root_bdm)
def test_volume_blockdevicemapping_min_disk(self):
# A bdm object volume smaller than the image's min_disk should not be
# allowed
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
- size=self.instance_type.root_gb * units.Gi,
- min_disk=self.instance_type.root_gb + 1)
+ size=self.flavor.root_gb * units.Gi,
+ min_disk=self.flavor.root_gb + 1)
volume_uuid = uuids.fake_2
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='image', destination_type='volume',
image_id=image_uuid, volume_id=volume_uuid,
- volume_size=self.instance_type.root_gb)
+ volume_size=self.flavor.root_gb)
self.assertRaises(exception.VolumeSmallerThanMinDisk,
self.compute_api._validate_flavor_image,
- self.context, image_uuid, image, self.instance_type,
+ self.context, image_uuid, image, self.flavor,
root_bdm)
def test_volume_blockdevicemapping_min_disk_no_size(self):
# We should allow a root volume whose size is not given
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
- size=self.instance_type.root_gb * units.Gi,
- min_disk=self.instance_type.root_gb)
+ size=self.flavor.root_gb * units.Gi,
+ min_disk=self.flavor.root_gb)
volume_uuid = uuids.fake_2
root_bdm = block_device_obj.BlockDeviceMapping(
@@ -13950,27 +13957,27 @@ class CheckRequestedImageTestCase(test.TestCase):
volume_id=volume_uuid, volume_size=None)
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, root_bdm)
+ image, self.flavor, root_bdm)
def test_image_blockdevicemapping(self):
# Test that we can succeed when passing bdms, and the root bdm isn't a
# volume
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
- size=self.instance_type.root_gb * units.Gi, min_disk=0)
+ size=self.flavor.root_gb * units.Gi, min_disk=0)
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='image', destination_type='local', image_id=image_uuid)
self.compute_api._validate_flavor_image(self.context, image['id'],
- image, self.instance_type, root_bdm)
+ image, self.flavor, root_bdm)
def test_image_blockdevicemapping_too_big(self):
# We should do a size check against flavor if we were passed bdms but
# the root bdm isn't a volume
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
- size=(self.instance_type.root_gb + 1) * units.Gi,
+ size=(self.flavor.root_gb + 1) * units.Gi,
min_disk=0)
root_bdm = block_device_obj.BlockDeviceMapping(
@@ -13979,14 +13986,14 @@ class CheckRequestedImageTestCase(test.TestCase):
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self.compute_api._validate_flavor_image,
self.context, image['id'],
- image, self.instance_type, root_bdm)
+ image, self.flavor, root_bdm)
def test_image_blockdevicemapping_min_disk(self):
# We should do a min_disk check against flavor if we were passed bdms
# but the root bdm isn't a volume
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
- size=0, min_disk=self.instance_type.root_gb + 1)
+ size=0, min_disk=self.flavor.root_gb + 1)
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='image', destination_type='local', image_id=image_uuid)
@@ -13994,7 +14001,7 @@ class CheckRequestedImageTestCase(test.TestCase):
self.assertRaises(exception.FlavorDiskSmallerThanMinDisk,
self.compute_api._validate_flavor_image,
self.context, image['id'],
- image, self.instance_type, root_bdm)
+ image, self.flavor, root_bdm)
@mock.patch('nova.virt.hardware.get_dedicated_cpu_constraint')
def test_cpu_policy(self, dedicated_cpu_mock):
@@ -14008,11 +14015,11 @@ class CheckRequestedImageTestCase(test.TestCase):
dedicated_cpu_mock.return_value = None
self.compute_api._validate_flavor_image(
- self.context, image['id'], image, self.instance_type, None)
+ self.context, image['id'], image, self.flavor, None)
image['properties'] = {'hw_cpu_policy': 'bar'}
self.assertRaises(exception.InvalidRequest,
self.compute_api._validate_flavor_image,
- self.context, image['id'], image, self.instance_type,
+ self.context, image['id'], image, self.flavor,
None)
def test_cpu_thread_policy(self):
@@ -14022,11 +14029,11 @@ class CheckRequestedImageTestCase(test.TestCase):
for v in obj_fields.CPUThreadAllocationPolicy.ALL:
image['properties']['hw_cpu_thread_policy'] = v
self.compute_api._validate_flavor_image(
- self.context, image['id'], image, self.instance_type, None)
+ self.context, image['id'], image, self.flavor, None)
image['properties']['hw_cpu_thread_policy'] = 'bar'
self.assertRaises(exception.InvalidRequest,
self.compute_api._validate_flavor_image,
- self.context, image['id'], image, self.instance_type,
+ self.context, image['id'], image, self.flavor,
None)
image['properties'] = {
@@ -14035,5 +14042,5 @@ class CheckRequestedImageTestCase(test.TestCase):
obj_fields.CPUThreadAllocationPolicy.ISOLATE}
self.assertRaises(exception.CPUThreadPolicyConfigurationInvalid,
self.compute_api._validate_flavor_image,
- self.context, image['id'], image, self.instance_type,
+ self.context, image['id'], image, self.flavor,
None)
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 7996044e03..693ed6d62f 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -3248,7 +3248,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_get_nodename.assert_called_once_with(instance)
mock_drop_move_claim.assert_called_once_with(
self.context, instance, 'fake-node',
- instance_type=instance.flavor)
+ flavor=instance.flavor)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(fake_driver.FakeDriver,
@@ -3388,12 +3388,16 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
CONF.host, instance.uuid, graceful_exit=False)
return result
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_success(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination()
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_fail(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
@@ -3403,7 +3407,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self._test_check_can_live_migrate_destination,
do_raise=True)
- def test_check_can_live_migrate_destination_contins_vifs(self):
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
+ def test_check_can_live_migrate_destination_contains_vifs(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
@@ -3411,6 +3417,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertIn('vifs', migrate_data)
self.assertIsNotNone(migrate_data.vifs)
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_no_binding_extended(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
@@ -3418,18 +3426,40 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
migrate_data = self._test_check_can_live_migrate_destination()
self.assertNotIn('vifs', migrate_data)
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_src_numa_lm_false(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=False)
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_src_numa_lm_true(self):
self.useFixture(std_fixtures.MonkeyPatch(
'nova.network.neutron.API.supports_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=True)
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ def test_check_can_live_migrate_destination_fail_group_policy(
+ self, mock_fail_db):
+
+ instance = fake_instance.fake_instance_obj(
+ self.context, host=self.compute.host, vm_state=vm_states.ACTIVE,
+ node='fake-node')
+
+ ex = exception.RescheduledException(
+ instance_uuid=instance.uuid, reason="policy violated")
+
+ with mock.patch.object(self.compute, '_validate_instance_group_policy',
+ side_effect=ex):
+ self.assertRaises(
+ exception.MigrationPreCheckError,
+ self.compute.check_can_live_migrate_destination,
+ self.context, instance, None, None, None, None)
+
def test_dest_can_numa_live_migrate(self):
positive_dest_check_data = objects.LibvirtLiveMigrateData(
dst_supports_numa_live_migration=True)
@@ -7494,7 +7524,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def test_validate_policy_honors_workaround_disabled(self, mock_get):
instance = objects.Instance(uuid=uuids.instance)
hints = {'group': 'foo'}
- mock_get.return_value = objects.InstanceGroup(policy=None)
+ mock_get.return_value = objects.InstanceGroup(policy=None,
+ uuid=uuids.group)
self.compute._validate_instance_group_policy(self.context,
instance, hints)
mock_get.assert_called_once_with(self.context, 'foo')
@@ -7520,10 +7551,14 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance, hints)
mock_get.assert_called_once_with(self.context, uuids.group_hint)
+ @mock.patch('nova.objects.InstanceGroup.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@mock.patch('nova.objects.InstanceGroup.get_by_hint')
- def test_validate_instance_group_policy_with_rules(self, mock_get_by_hint,
- mock_get_by_host):
+ @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
+ @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
+ def test_validate_instance_group_policy_with_rules(
+ self, migration_list, nodes, mock_get_by_hint, mock_get_by_host,
+ mock_get_by_uuid):
# Create 2 instance in same host, inst2 created before inst1
instance = objects.Instance(uuid=uuids.inst1)
hints = {'group': [uuids.group_hint]}
@@ -7532,17 +7567,26 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_get_by_host.return_value = existing_insts
# if group policy rules limit to 1, raise RescheduledException
- mock_get_by_hint.return_value = objects.InstanceGroup(
+ group = objects.InstanceGroup(
policy='anti-affinity', rules={'max_server_per_host': '1'},
- hosts=['host1'], members=members_uuids)
+ hosts=['host1'], members=members_uuids,
+ uuid=uuids.group)
+ mock_get_by_hint.return_value = group
+ mock_get_by_uuid.return_value = group
+ nodes.return_value = ['nodename']
+ migration_list.return_value = [objects.Migration(
+ uuid=uuids.migration, instance_uuid=uuids.instance)]
self.assertRaises(exception.RescheduledException,
self.compute._validate_instance_group_policy,
self.context, instance, hints)
# if group policy rules limit change to 2, validate OK
- mock_get_by_hint.return_value = objects.InstanceGroup(
+ group2 = objects.InstanceGroup(
policy='anti-affinity', rules={'max_server_per_host': 2},
- hosts=['host1'], members=members_uuids)
+ hosts=['host1'], members=members_uuids,
+ uuid=uuids.group)
+ mock_get_by_hint.return_value = group2
+ mock_get_by_uuid.return_value = group2
self.compute._validate_instance_group_policy(self.context,
instance, hints)
@@ -9070,6 +9114,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
manager.ComputeManager()
mock_executor.assert_called_once_with()
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_pre_live_migration_cinder_v3_api(self):
# This tests that pre_live_migration with a bdm with an
# attachment_id, will create a new attachment and update
@@ -9147,6 +9193,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
_test()
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_pre_live_migration_exception_cinder_v3_api(self):
# The instance in this test has 2 attachments. The second attach_create
# will throw an exception. This will test that the first attachment
@@ -9216,6 +9264,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertGreater(len(m.mock_calls), 0)
_test()
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_pre_live_migration_exceptions_delete_attachments(self):
# The instance in this test has 2 attachments. The call to
# driver.pre_live_migration will raise an exception. This will test
@@ -10594,6 +10644,54 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
# (_error_out_instance_on_exception will set to ACTIVE by default).
self.assertEqual(vm_states.STOPPED, instance.vm_state)
+ @mock.patch('nova.compute.utils.notify_usage_exists')
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_notify_about_instance_usage')
+ @mock.patch('nova.compute.utils.notify_about_resize_prep_instance')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.compute.manager.ComputeManager._revert_allocation')
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_reschedule_resize_or_reraise')
+ @mock.patch('nova.compute.utils.add_instance_fault_from_exc')
+ # this is almost copy-paste from test_prep_resize_fails_rollback
+ def test_prep_resize_fails_group_validation(
+ self, add_instance_fault_from_exc, _reschedule_resize_or_reraise,
+ _revert_allocation, mock_instance_save,
+ notify_about_resize_prep_instance, _notify_about_instance_usage,
+ notify_usage_exists):
+ """Tests that if _validate_instance_group_policy raises
+ InstanceFaultRollback, the instance.vm_state is reset properly in
+ _error_out_instance_on_exception
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, host=self.compute.host, vm_state=vm_states.STOPPED,
+ node='fake-node', expected_attrs=['system_metadata', 'flavor'])
+ migration = mock.MagicMock(spec='nova.objects.Migration')
+ request_spec = mock.MagicMock(spec='nova.objects.RequestSpec')
+ ex = exception.RescheduledException(
+ instance_uuid=instance.uuid, reason="policy violated")
+ ex2 = exception.InstanceFaultRollback(
+ inner_exception=ex)
+
+ def fake_reschedule_resize_or_reraise(*args, **kwargs):
+ raise ex2
+
+ _reschedule_resize_or_reraise.side_effect = (
+ fake_reschedule_resize_or_reraise)
+
+ with mock.patch.object(self.compute, '_validate_instance_group_policy',
+ side_effect=ex):
+ self.assertRaises(
+ # _error_out_instance_on_exception should reraise the
+ # RescheduledException inside InstanceFaultRollback.
+ exception.RescheduledException, self.compute.prep_resize,
+ self.context, instance.image_meta, instance, instance.flavor,
+ request_spec, filter_properties={}, node=instance.node,
+ clean_shutdown=True, migration=migration, host_list=[])
+ # The instance.vm_state should remain unchanged
+ # (_error_out_instance_on_exception will set to ACTIVE by default).
+ self.assertEqual(vm_states.STOPPED, instance.vm_state)
+
@mock.patch('nova.compute.rpcapi.ComputeAPI.resize_instance')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
diff --git a/nova/tests/unit/compute/test_keypairs.py b/nova/tests/unit/compute/test_keypairs.py
index 8ba09bbea7..d0b204a970 100644
--- a/nova/tests/unit/compute/test_keypairs.py
+++ b/nova/tests/unit/compute/test_keypairs.py
@@ -217,6 +217,12 @@ class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
def test_success_ssh(self):
self._check_success()
+ def test_success_ssh_ed25519(self):
+ self.pub_key = ('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFl6u75BTi8xGtSPm'
+ '1yVJuLE/oMtCOuEMJJnBSuZEdXz')
+ self.fingerprint = '1a:1d:a7:2c:4c:ff:15:c4:70:13:38:b6:ac:4c:dc:12'
+ self._check_success()
+
def test_success_x509(self):
self.keypair_type = keypair_obj.KEYPAIR_TYPE_X509
certif, fingerprint = fake_crypto.get_x509_cert_and_fingerprint()
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index f095a91bae..947e281b98 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -104,7 +104,7 @@ _COMPUTE_NODE_FIXTURES = [
),
]
-_INSTANCE_TYPE_FIXTURES = {
+_FLAVOR_FIXTURES = {
1: {
'id': 1,
'flavorid': 'fakeid-1',
@@ -136,7 +136,7 @@ _INSTANCE_TYPE_FIXTURES = {
}
-_INSTANCE_TYPE_OBJ_FIXTURES = {
+_FLAVOR_OBJ_FIXTURES = {
1: objects.Flavor(id=1, flavorid='fakeid-1', name='fake1.small',
memory_mb=128, vcpus=1, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=0,
@@ -199,50 +199,50 @@ _INSTANCE_FIXTURES = [
host=_HOSTNAME,
node=_NODENAME,
uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
- memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
- vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
- root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
- ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
+ memory_mb=_FLAVOR_FIXTURES[1]['memory_mb'],
+ vcpus=_FLAVOR_FIXTURES[1]['vcpus'],
+ root_gb=_FLAVOR_FIXTURES[1]['root_gb'],
+ ephemeral_gb=_FLAVOR_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
pci_requests=None,
pci_devices=None,
- instance_type_id=1,
+ instance_type_id=_FLAVOR_OBJ_FIXTURES[1].id,
+ flavor=_FLAVOR_OBJ_FIXTURES[1],
+ old_flavor=_FLAVOR_OBJ_FIXTURES[1],
+ new_flavor=_FLAVOR_OBJ_FIXTURES[1],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=None,
os_type='fake-os', # Used by the stats collector.
project_id='fake-project', # Used by the stats collector.
user_id=uuids.user_id,
- flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
- old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
- new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
- deleted = False,
- resources = None,
+ deleted=False,
+ resources=None,
),
objects.Instance(
id=2,
host=_HOSTNAME,
node=_NODENAME,
uuid='33805b54-dea6-47b8-acb2-22aeb1b57919',
- memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
- vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
- root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
- ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
+ memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
+ vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
+ root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
+ ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
- instance_type_id=2,
+ instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
+ flavor=_FLAVOR_OBJ_FIXTURES[2],
+ old_flavor=_FLAVOR_OBJ_FIXTURES[2],
+ new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.DELETED,
power_state=power_state.SHUTDOWN,
task_state=None,
os_type='fake-os',
project_id='fake-project-2',
user_id=uuids.user_id,
- flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
- old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
- new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
- deleted = False,
- resources = None,
+ deleted=False,
+ resources=None,
),
]
@@ -312,24 +312,24 @@ _MIGRATION_INSTANCE_FIXTURES = {
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
- memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
- vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
- root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
- ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
+ memory_mb=_FLAVOR_FIXTURES[1]['memory_mb'],
+ vcpus=_FLAVOR_FIXTURES[1]['vcpus'],
+ root_gb=_FLAVOR_FIXTURES[1]['root_gb'],
+ ephemeral_gb=_FLAVOR_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
pci_requests=None,
pci_devices=None,
- instance_type_id=1,
+ instance_type_id=_FLAVOR_OBJ_FIXTURES[1].id,
+ flavor=_FLAVOR_OBJ_FIXTURES[1],
+ old_flavor=_FLAVOR_OBJ_FIXTURES[1],
+ new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
- flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
- old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
- new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
- resources = None,
+ resources=None,
),
# dest-only
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance(
@@ -337,23 +337,23 @@ _MIGRATION_INSTANCE_FIXTURES = {
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
- memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
- vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
- root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
- ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
+ memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
+ vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
+ root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
+ ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
- instance_type_id=2,
+ instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
+ flavor=_FLAVOR_OBJ_FIXTURES[2],
+ old_flavor=_FLAVOR_OBJ_FIXTURES[1],
+ new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
- flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
- old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
- new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources=None,
),
# source-and-dest
@@ -362,23 +362,23 @@ _MIGRATION_INSTANCE_FIXTURES = {
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
- memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
- vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
- root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
- ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
+ memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
+ vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
+ root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
+ ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
- instance_type_id=2,
+ instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
+ flavor=_FLAVOR_OBJ_FIXTURES[2],
+ old_flavor=_FLAVOR_OBJ_FIXTURES[1],
+ new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
- flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
- old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
- new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources=None,
),
# dest-only-evac
@@ -387,23 +387,23 @@ _MIGRATION_INSTANCE_FIXTURES = {
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
- memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
- vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
- root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
- ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
+ memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
+ vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
+ root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
+ ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
- instance_type_id=2,
+ instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
+ flavor=_FLAVOR_OBJ_FIXTURES[2],
+ old_flavor=_FLAVOR_OBJ_FIXTURES[1],
+ new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.REBUILDING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
- flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
- old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
- new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources=None,
),
}
@@ -2388,7 +2388,7 @@ class TestResize(BaseTestCase):
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance = _INSTANCE_FIXTURES[0].obj_clone()
- instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
+ instance.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
# This migration context is fine, it points to the first instance
# fixture and indicates a source-and-dest resize.
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[instance.uuid]
@@ -2409,7 +2409,7 @@ class TestResize(BaseTestCase):
status='migrating',
uuid=uuids.migration,
)
- new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
+ new_flavor = _FLAVOR_OBJ_FIXTURES[2]
# not using mock.sentinel.ctx because resize_claim calls #elevated
ctx = mock.MagicMock()
@@ -2499,7 +2499,7 @@ class TestResize(BaseTestCase):
instance = _INSTANCE_FIXTURES[0].obj_clone()
old_flavor = instance.flavor
- instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
+ instance.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
instance.pci_requests = objects.InstancePCIRequests(requests=[])
# allocations for create
@@ -2559,7 +2559,7 @@ class TestResize(BaseTestCase):
status='migrating',
uuid=uuids.migration,
)
- new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
+ new_flavor = _FLAVOR_OBJ_FIXTURES[2]
# Resize instance
with test.nested(
@@ -2674,7 +2674,7 @@ class TestResize(BaseTestCase):
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.task_state = task_states.RESIZE_MIGRATING
- instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
+ instance.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
# A destination-only migration
migration = objects.Migration(
@@ -2698,7 +2698,7 @@ class TestResize(BaseTestCase):
old_numa_topology=None,
)
instance.migration_context = mig_context_obj
- new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
+ new_flavor = _FLAVOR_OBJ_FIXTURES[2]
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
@@ -2761,7 +2761,7 @@ class TestResize(BaseTestCase):
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.task_state = task_states.RESIZE_MIGRATING
- instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
+ instance.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
instance.migration_context = objects.MigrationContext()
instance.migration_context.new_pci_devices = objects.PciDeviceList(
objects=pci_devs)
@@ -2835,7 +2835,7 @@ class TestResize(BaseTestCase):
instance1.id = 1
instance1.uuid = uuids.instance1
instance1.task_state = task_states.RESIZE_MIGRATING
- instance1.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
+ instance1.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
migration1 = objects.Migration(
id=1,
@@ -2858,7 +2858,7 @@ class TestResize(BaseTestCase):
old_numa_topology=None,
)
instance1.migration_context = mig_context_obj1
- flavor1 = _INSTANCE_TYPE_OBJ_FIXTURES[2]
+ flavor1 = _FLAVOR_OBJ_FIXTURES[2]
# Instance #2 is resizing to instance type 1 which has 1 vCPU, 128MB
# RAM and 1GB root disk.
@@ -2866,8 +2866,8 @@ class TestResize(BaseTestCase):
instance2.id = 2
instance2.uuid = uuids.instance2
instance2.task_state = task_states.RESIZE_MIGRATING
- instance2.old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
- instance2.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1]
+ instance2.old_flavor = _FLAVOR_OBJ_FIXTURES[2]
+ instance2.new_flavor = _FLAVOR_OBJ_FIXTURES[1]
migration2 = objects.Migration(
id=2,
@@ -2890,7 +2890,7 @@ class TestResize(BaseTestCase):
old_numa_topology=None,
)
instance2.migration_context = mig_context_obj2
- flavor2 = _INSTANCE_TYPE_OBJ_FIXTURES[1]
+ flavor2 = _FLAVOR_OBJ_FIXTURES[1]
expected = self.rt.compute_nodes[_NODENAME].obj_clone()
expected.vcpus_used = (expected.vcpus_used +
@@ -2990,23 +2990,23 @@ class TestRebuild(BaseTestCase):
host=None,
node=None,
uuid='abef5b54-dea6-47b8-acb2-22aeb1b57919',
- memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
- vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
- root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
- ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
+ memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
+ vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
+ root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
+ ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
- instance_type_id=2,
+ instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
+ flavor=_FLAVOR_OBJ_FIXTURES[2],
+ old_flavor=_FLAVOR_OBJ_FIXTURES[2],
+ new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.REBUILDING,
os_type='fake-os',
project_id='fake-project',
- flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
- old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
- new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
- resources = None,
+ resources=None,
)
# not using mock.sentinel.ctx because resize_claim calls #elevated
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index 545fac4edd..f062d5f45e 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -40,10 +40,11 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
super(ComputeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
self.fake_flavor_obj = fake_flavor.fake_flavor_obj(self.context)
- self.fake_flavor = jsonutils.to_primitive(self.fake_flavor_obj)
- instance_attr = {'host': 'fake_host',
- 'instance_type_id': self.fake_flavor_obj['id'],
- 'instance_type': self.fake_flavor_obj}
+ instance_attr = {
+ 'host': 'fake_host',
+ 'instance_type_id': self.fake_flavor_obj['id'],
+ 'flavor': self.fake_flavor_obj,
+ }
self.fake_instance_obj = fake_instance.fake_instance_obj(self.context,
**instance_attr)
self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj)
@@ -920,7 +921,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
rpcapi.resize_instance(
ctxt, instance=self.fake_instance_obj,
migration=mock.sentinel.migration, image='image',
- flavor='instance_type', clean_shutdown=True,
+ flavor=self.fake_flavor_obj, clean_shutdown=True,
request_spec=self.fake_request_spec_obj)
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
@@ -930,7 +931,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
mock_cctx.cast.assert_called_with(
ctxt, 'resize_instance', instance=self.fake_instance_obj,
migration=mock.sentinel.migration, image='image',
- instance_type='instance_type', clean_shutdown=True)
+ instance_type=self.fake_flavor_obj, clean_shutdown=True)
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',
diff --git a/nova/tests/unit/conductor/tasks/test_migrate.py b/nova/tests/unit/conductor/tasks/test_migrate.py
index 72423610ea..9e29cd171e 100644
--- a/nova/tests/unit/conductor/tasks/test_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_migrate.py
@@ -40,8 +40,8 @@ class MigrationTaskTestCase(test.NoDBTestCase):
self.context.cell_uuid = uuids.cell1
self.flavor = fake_flavor.fake_flavor_obj(self.context)
self.flavor.extra_specs = {'extra_specs': 'fake'}
- inst = fake_instance.fake_db_instance(image_ref='image_ref',
- instance_type=self.flavor)
+ inst = fake_instance.fake_db_instance(
+ image_ref='image_ref', flavor=self.flavor)
inst_object = objects.Instance(
flavor=self.flavor,
numa_topology=None,
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 8d124fe0fa..7366ea7ec1 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -457,21 +457,21 @@ class _BaseTaskTestCase(object):
"""
fake_spec = objects.RequestSpec()
mock_fp.return_value = fake_spec
- instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
+ flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
# NOTE(danms): Avoid datetime timezone issues with converted flavors
- instance_type.created_at = None
+ flavor.created_at = None
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuids.fake,
- flavor=instance_type) for i in range(2)]
- instance_type_p = obj_base.obj_to_primitive(instance_type)
+ flavor=flavor) for i in range(2)]
+ flavor_p = obj_base.obj_to_primitive(flavor)
instance_properties = obj_base.obj_to_primitive(instances[0])
instance_properties['system_metadata'] = flavors.save_flavor_info(
- {}, instance_type)
+ {}, flavor)
spec = {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
- 'instance_type': instance_type_p,
+ 'instance_type': flavor_p,
'num_instances': 2}
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}}
sched_return = copy.deepcopy(fake_host_lists2)
@@ -564,16 +564,16 @@ class _BaseTaskTestCase(object):
"""
fake_spec = objects.RequestSpec()
mock_fp.return_value = fake_spec
- instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
+ flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
# NOTE(danms): Avoid datetime timezone issues with converted flavors
- instance_type.created_at = None
+ flavor.created_at = None
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuids.fake,
- flavor=instance_type) for i in range(2)]
+ flavor=flavor) for i in range(2)]
instance_properties = obj_base.obj_to_primitive(instances[0])
instance_properties['system_metadata'] = flavors.save_flavor_info(
- {}, instance_type)
+ {}, flavor)
sched_return = copy.deepcopy(fake_host_lists2)
mock_schedule.return_value = sched_return
diff --git a/nova/tests/unit/db/fakes.py b/nova/tests/unit/db/fakes.py
index 65693c6927..ac3030f9f0 100644
--- a/nova/tests/unit/db/fakes.py
+++ b/nova/tests/unit/db/fakes.py
@@ -28,8 +28,8 @@ def stub_out(test, funcs):
def stub_out_db_instance_api(test, injected=True):
"""Stubs out the db API for creating Instances."""
- def _create_instance_type(**updates):
- instance_type = {'id': 2,
+ def _create_flavor(**updates):
+ flavor = {'id': 2,
'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
@@ -49,11 +49,11 @@ def stub_out_db_instance_api(test, injected=True):
'description': None
}
if updates:
- instance_type.update(updates)
- return instance_type
+ flavor.update(updates)
+ return flavor
- INSTANCE_TYPES = {
- 'm1.tiny': _create_instance_type(
+ FLAVORS = {
+ 'm1.tiny': _create_flavor(
id=2,
name='m1.tiny',
memory_mb=512,
@@ -64,7 +64,7 @@ def stub_out_db_instance_api(test, injected=True):
flavorid=1,
rxtx_factor=1.0,
swap=0),
- 'm1.small': _create_instance_type(
+ 'm1.small': _create_flavor(
id=5,
name='m1.small',
memory_mb=2048,
@@ -75,7 +75,7 @@ def stub_out_db_instance_api(test, injected=True):
flavorid=2,
rxtx_factor=1.0,
swap=1024),
- 'm1.medium': _create_instance_type(
+ 'm1.medium': _create_flavor(
id=1,
name='m1.medium',
memory_mb=4096,
@@ -86,7 +86,7 @@ def stub_out_db_instance_api(test, injected=True):
flavorid=3,
rxtx_factor=1.0,
swap=0),
- 'm1.large': _create_instance_type(
+ 'm1.large': _create_flavor(
id=3,
name='m1.large',
memory_mb=8192,
@@ -97,7 +97,7 @@ def stub_out_db_instance_api(test, injected=True):
flavorid=4,
rxtx_factor=1.0,
swap=0),
- 'm1.xlarge': _create_instance_type(
+ 'm1.xlarge': _create_flavor(
id=4,
name='m1.xlarge',
memory_mb=16384,
@@ -110,15 +110,15 @@ def stub_out_db_instance_api(test, injected=True):
swap=0)}
def fake_flavor_get_all(*a, **k):
- return INSTANCE_TYPES.values()
+ return FLAVORS.values()
@classmethod
def fake_flavor_get_by_name(cls, context, name):
- return INSTANCE_TYPES[name]
+ return FLAVORS[name]
@classmethod
def fake_flavor_get(cls, context, id):
- for inst_type in INSTANCE_TYPES.values():
+ for inst_type in FLAVORS.values():
if str(inst_type['id']) == str(id):
return inst_type
return None
diff --git a/nova/tests/unit/db/test_migration_utils.py b/nova/tests/unit/db/test_migration_utils.py
index 5211ea4574..e98b1071a5 100644
--- a/nova/tests/unit/db/test_migration_utils.py
+++ b/nova/tests/unit/db/test_migration_utils.py
@@ -16,11 +16,9 @@
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_fixtures
-from oslo_utils import uuidutils
from sqlalchemy import Integer, String
from sqlalchemy import MetaData, Table, Column
from sqlalchemy.exc import NoSuchTableError
-from sqlalchemy import sql
from sqlalchemy.types import UserDefinedType
from nova.db.sqlalchemy import api as db
@@ -57,39 +55,6 @@ class TestMigrationUtilsSQLite(
self.engine = enginefacade.writer.get_engine()
self.meta = MetaData(bind=self.engine)
- def test_delete_from_select(self):
- table_name = "__test_deletefromselect_table__"
- uuidstrs = []
- for unused in range(10):
- uuidstrs.append(uuidutils.generate_uuid(dashed=False))
-
- conn = self.engine.connect()
- test_table = Table(table_name, self.meta,
- Column('id', Integer, primary_key=True,
- nullable=False, autoincrement=True),
- Column('uuid', String(36), nullable=False))
- test_table.create()
- # Add 10 rows to table
- for uuidstr in uuidstrs:
- ins_stmt = test_table.insert().values(uuid=uuidstr)
- conn.execute(ins_stmt)
-
- # Delete 4 rows in one chunk
- column = test_table.c.id
- query_delete = sql.select([column],
- test_table.c.id < 5).order_by(column)
- delete_statement = db.DeleteFromSelect(test_table,
- query_delete, column)
- result_delete = conn.execute(delete_statement)
- # Verify we delete 4 rows
- self.assertEqual(result_delete.rowcount, 4)
-
- query_all = sql.select([test_table])\
- .where(test_table.c.uuid.in_(uuidstrs))
- rows = conn.execute(query_all).fetchall()
- # Verify we still have 6 rows in table
- self.assertEqual(len(rows), 6)
-
def test_check_shadow_table(self):
table_name = 'test_check_shadow_table'
diff --git a/nova/tests/unit/fake_instance.py b/nova/tests/unit/fake_instance.py
index 20a9889fe2..a3feede37e 100644
--- a/nova/tests/unit/fake_instance.py
+++ b/nova/tests/unit/fake_instance.py
@@ -43,11 +43,11 @@ def fake_db_secgroups(instance, names):
def fake_db_instance(**updates):
- if 'instance_type' in updates:
- if isinstance(updates['instance_type'], objects.Flavor):
- flavor = updates['instance_type']
+ if 'flavor' in updates:
+ if isinstance(updates['flavor'], objects.Flavor):
+ flavor = updates['flavor']
else:
- flavor = objects.Flavor(**updates['instance_type'])
+ flavor = objects.Flavor(**updates['flavor'])
flavorinfo = jsonutils.dumps({
'cur': flavor.obj_to_primitive(),
'old': None,
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index 3176cf48f1..a85a19e285 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -251,6 +251,8 @@ class TestNeutronClient(test.NoDBTestCase):
auth_token='token')
cl = neutronapi.get_client(my_context)
self.assertEqual(retries, cl.httpclient.connect_retries)
+ kcl = neutronapi._get_ksa_client(my_context)
+ self.assertEqual(retries, kcl.connect_retries)
class TestAPIBase(test.TestCase):
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
index 1129a3a3bf..e9e67ae7b1 100644
--- a/nova/tests/unit/objects/test_instance.py
+++ b/nova/tests/unit/objects/test_instance.py
@@ -75,8 +75,10 @@ class _TestInstanceObject(object):
db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache,
instance_uuid=db_inst['uuid'])
+ db_inst['image_ref'] = uuids.image_ref
db_inst['system_metadata'] = {
+ 'image_id': uuids.image_id,
'image_name': 'os2-warp',
'image_min_ram': 100,
'image_hw_disk_bus': 'ide',
@@ -962,12 +964,12 @@ class _TestInstanceObject(object):
fake_inst = dict(self.fake_instance)
mock_get.return_value = fake_inst
- inst = instance.Instance.get_by_uuid(self.context,
- fake_inst['uuid'],
- expected_attrs=['image_meta'])
+ inst = instance.Instance.get_by_uuid(
+ self.context, fake_inst['uuid'], expected_attrs=['image_meta'])
image_meta = inst.image_meta
self.assertIsInstance(image_meta, objects.ImageMeta)
+ self.assertEqual(uuids.image_ref, image_meta.id)
self.assertEqual(100, image_meta.min_ram)
self.assertEqual('ide', image_meta.properties.hw_disk_bus)
self.assertEqual('ne2k_pci', image_meta.properties.hw_vif_model)
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index cfda29ef86..2e1c69f9bf 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -16,6 +16,7 @@ import collections
import contextlib
import copy
import datetime
+import inspect
import os
import pprint
@@ -1317,12 +1318,12 @@ class TestObjEqualPrims(_BaseTestCase):
class TestObjMethodOverrides(test.NoDBTestCase):
def test_obj_reset_changes(self):
- args = utils.getargspec(base.NovaObject.obj_reset_changes)
+ args = inspect.getfullargspec(base.NovaObject.obj_reset_changes)
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
obj_class = obj_classes[obj_name][0]
self.assertEqual(args,
- utils.getargspec(obj_class.obj_reset_changes))
+ inspect.getfullargspec(obj_class.obj_reset_changes))
class TestObjectsDefaultingOnInit(test.NoDBTestCase):
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index f31fa832e7..7edc84bf62 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -674,6 +674,9 @@ class _TestRequestSpecObject(object):
req_obj.retry = expected_retry
nr = objects.NetworkRequest()
req_obj.requested_networks = objects.NetworkRequestList(objects=[nr])
+ req_lvl_params = objects.RequestLevelParams(
+ root_required={"CUSTOM_FOO"})
+ req_obj.request_level_params = req_lvl_params
orig_create_in_db = request_spec.RequestSpec._create_in_db
with mock.patch.object(request_spec.RequestSpec, '_create_in_db') \
@@ -688,13 +691,16 @@ class _TestRequestSpecObject(object):
# 3. requested_resources
# 4. retry
# 5. requested_networks
+ # 6. request_level_params
data = jsonutils.loads(updates['spec'])['nova_object.data']
self.assertNotIn('network_metadata', data)
self.assertIsNone(data['requested_destination'])
self.assertIsNone(data['requested_resources'])
self.assertIsNone(data['retry'])
- self.assertIsNotNone(data['instance_uuid'])
self.assertNotIn('requested_networks', data)
+ self.assertNotIn('request_level_params', data)
+
+ self.assertIsNotNone(data['instance_uuid'])
# also we expect that the following fields are not reset after create
# 1. network_metadata
@@ -702,6 +708,7 @@ class _TestRequestSpecObject(object):
# 3. requested_resources
# 4. retry
# 5. requested_networks
+ # 6. request_level_params
self.assertIsNotNone(req_obj.network_metadata)
self.assertJsonEqual(expected_network_metadata.obj_to_primitive(),
req_obj.network_metadata.obj_to_primitive())
@@ -717,6 +724,10 @@ class _TestRequestSpecObject(object):
self.assertIsNotNone(req_obj.requested_networks)
self.assertJsonEqual(nr.obj_to_primitive(),
req_obj.requested_networks[0].obj_to_primitive())
+ self.assertIsNotNone(req_obj.request_level_params)
+ self.assertJsonEqual(
+ req_lvl_params.obj_to_primitive(),
+ req_obj.request_level_params.obj_to_primitive())
def test_save_does_not_persist_requested_fields(self):
req_obj = fake_request_spec.fake_spec_obj(remove_id=True)
diff --git a/nova/tests/unit/scheduler/test_scheduler_utils.py b/nova/tests/unit/scheduler/test_scheduler_utils.py
index 72079b2688..2329c2e90a 100644
--- a/nova/tests/unit/scheduler/test_scheduler_utils.py
+++ b/nova/tests/unit/scheduler/test_scheduler_utils.py
@@ -40,21 +40,21 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
def test_build_request_spec_without_image(self):
instance = {'uuid': uuids.instance}
- instance_type = objects.Flavor(**test_flavor.fake_flavor)
+ flavor = objects.Flavor(**test_flavor.fake_flavor)
with mock.patch.object(flavors, 'extract_flavor') as mock_extract:
- mock_extract.return_value = instance_type
+ mock_extract.return_value = flavor
request_spec = scheduler_utils.build_request_spec(None,
[instance])
mock_extract.assert_called_once_with({'uuid': uuids.instance})
self.assertEqual({}, request_spec['image'])
def test_build_request_spec_with_object(self):
- instance_type = objects.Flavor()
+ flavor = objects.Flavor()
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(instance, 'get_flavor') as mock_get:
- mock_get.return_value = instance_type
+ mock_get.return_value = flavor
request_spec = scheduler_utils.build_request_spec(None,
[instance])
mock_get.assert_called_once_with()
@@ -134,23 +134,23 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
sched_hints = {'hint': ['over-there']}
forced_host = 'forced-host1'
forced_node = 'forced-node1'
- instance_type = objects.Flavor()
+ flavor = objects.Flavor()
filt_props = scheduler_utils.build_filter_properties(sched_hints,
- forced_host, forced_node, instance_type)
+ forced_host, forced_node, flavor)
self.assertEqual(sched_hints, filt_props['scheduler_hints'])
self.assertEqual([forced_host], filt_props['force_hosts'])
self.assertEqual([forced_node], filt_props['force_nodes'])
- self.assertEqual(instance_type, filt_props['instance_type'])
+ self.assertEqual(flavor, filt_props['instance_type'])
def test_build_filter_properties_no_forced_host_no_force_node(self):
sched_hints = {'hint': ['over-there']}
forced_host = None
forced_node = None
- instance_type = objects.Flavor()
+ flavor = objects.Flavor()
filt_props = scheduler_utils.build_filter_properties(sched_hints,
- forced_host, forced_node, instance_type)
+ forced_host, forced_node, flavor)
self.assertEqual(sched_hints, filt_props['scheduler_hints'])
- self.assertEqual(instance_type, filt_props['instance_type'])
+ self.assertEqual(flavor, filt_props['instance_type'])
self.assertNotIn('forced_host', filt_props)
self.assertNotIn('forced_node', filt_props)
diff --git a/nova/tests/unit/test_instance_types_extra_specs.py b/nova/tests/unit/test_flavor_extra_specs.py
index 86cf4e6994..1033f1373c 100644
--- a/nova/tests/unit/test_instance_types_extra_specs.py
+++ b/nova/tests/unit/test_flavor_extra_specs.py
@@ -40,7 +40,6 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
flavor.extra_specs = self.specs
flavor.create()
self.flavor = flavor
- self.instance_type_id = flavor.id
self.flavorid = flavor.flavorid
def tearDown(self):
@@ -48,28 +47,25 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
self.flavor.destroy()
super(InstanceTypeExtraSpecsTestCase, self).tearDown()
- def test_instance_type_specs_get(self):
- flavor = objects.Flavor.get_by_flavor_id(self.context,
- self.flavorid)
+ def test_flavor_extra_specs_get(self):
+ flavor = objects.Flavor.get_by_flavor_id(self.context, self.flavorid)
self.assertEqual(self.specs, flavor.extra_specs)
def test_flavor_extra_specs_delete(self):
del self.specs["xpu_model"]
del self.flavor.extra_specs['xpu_model']
self.flavor.save()
- flavor = objects.Flavor.get_by_flavor_id(self.context,
- self.flavorid)
+ flavor = objects.Flavor.get_by_flavor_id(self.context, self.flavorid)
self.assertEqual(self.specs, flavor.extra_specs)
- def test_instance_type_extra_specs_update(self):
+ def test_flavor_extra_specs_update(self):
self.specs["cpu_model"] = "Sandy Bridge"
self.flavor.extra_specs["cpu_model"] = "Sandy Bridge"
self.flavor.save()
- flavor = objects.Flavor.get_by_flavor_id(self.context,
- self.flavorid)
+ flavor = objects.Flavor.get_by_flavor_id(self.context, self.flavorid)
self.assertEqual(self.specs, flavor.extra_specs)
- def test_instance_type_extra_specs_create(self):
+ def test_flavor_extra_specs_create(self):
net_attrs = {
"net_arch": "ethernet",
"net_mbps": "10000"
@@ -77,15 +73,14 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
self.specs.update(net_attrs)
self.flavor.extra_specs.update(net_attrs)
self.flavor.save()
- flavor = objects.Flavor.get_by_flavor_id(self.context,
- self.flavorid)
+ flavor = objects.Flavor.get_by_flavor_id(self.context, self.flavorid)
self.assertEqual(self.specs, flavor.extra_specs)
- def test_instance_type_get_with_extra_specs(self):
+ def test_flavor_get_with_extra_specs(self):
flavor = objects.Flavor.get_by_id(self.context, 5)
self.assertEqual(flavor.extra_specs, {})
- def test_instance_type_get_by_name_with_extra_specs(self):
+ def test_flavor_get_by_name_with_extra_specs(self):
flavor = objects.Flavor.get_by_name(self.context,
"cg1.4xlarge")
self.assertEqual(flavor.extra_specs, self.specs)
@@ -93,13 +88,13 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
"m1.small")
self.assertEqual(flavor.extra_specs, {})
- def test_instance_type_get_by_flavor_id_with_extra_specs(self):
+ def test_flavor_get_by_flavor_id_with_extra_specs(self):
flavor = objects.Flavor.get_by_flavor_id(self.context, 105)
self.assertEqual(flavor.extra_specs, self.specs)
flavor = objects.Flavor.get_by_flavor_id(self.context, 2)
self.assertEqual(flavor.extra_specs, {})
- def test_instance_type_get_all(self):
+ def test_flavor_get_all(self):
flavors = objects.FlavorList.get_all(self.context)
name2specs = {flavor.name: flavor.extra_specs
diff --git a/nova/tests/unit/test_flavors.py b/nova/tests/unit/test_flavors.py
index 1f4bed85a6..6a5e6b2c2d 100644
--- a/nova/tests/unit/test_flavors.py
+++ b/nova/tests/unit/test_flavors.py
@@ -22,7 +22,7 @@ from nova.objects import base as obj_base
from nova import test
-class InstanceTypeTestCase(test.TestCase):
+class FlavorTestCase(test.TestCase):
"""Test cases for flavor code."""
def test_will_not_get_instance_by_unknown_flavor_id(self):
# Ensure get by flavor raises error with wrong flavorid.
@@ -31,39 +31,39 @@ class InstanceTypeTestCase(test.TestCase):
'unknown_flavor')
def test_will_get_instance_by_flavor_id(self):
- default_instance_type = objects.Flavor.get_by_name(
+ default_flavor = objects.Flavor.get_by_name(
context.get_admin_context(), 'm1.small')
- flavorid = default_instance_type.flavorid
+ flavorid = default_flavor.flavorid
fetched = flavors.get_flavor_by_flavor_id(flavorid)
self.assertIsInstance(fetched, objects.Flavor)
- self.assertEqual(default_instance_type.flavorid, fetched.flavorid)
+ self.assertEqual(default_flavor.flavorid, fetched.flavorid)
-class InstanceTypeToolsTest(test.TestCase):
+class FlavorToolsTest(test.TestCase):
def setUp(self):
- super(InstanceTypeToolsTest, self).setUp()
+ super().setUp()
self.context = context.get_admin_context()
def _dict_to_metadata(self, data):
return [{'key': key, 'value': value} for key, value in data.items()]
def _test_extract_flavor(self, prefix):
- instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
- instance_type_p = obj_base.obj_to_primitive(instance_type)
+ flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
+ flavor_p = obj_base.obj_to_primitive(flavor)
metadata = {}
- flavors.save_flavor_info(metadata, instance_type, prefix)
+ flavors.save_flavor_info(metadata, flavor, prefix)
instance = {'system_metadata': self._dict_to_metadata(metadata)}
- _instance_type = flavors.extract_flavor(instance, prefix)
- _instance_type_p = obj_base.obj_to_primitive(_instance_type)
+ _flavor = flavors.extract_flavor(instance, prefix)
+ _flavor_p = obj_base.obj_to_primitive(_flavor)
props = flavors.system_metadata_flavor_props.keys()
- for key in list(instance_type_p.keys()):
+ for key in list(flavor_p.keys()):
if key not in props:
- del instance_type_p[key]
+ del flavor_p[key]
- self.assertEqual(instance_type_p, _instance_type_p)
+ self.assertEqual(flavor_p, _flavor_p)
def test_extract_flavor(self):
self._test_extract_flavor('')
@@ -79,47 +79,47 @@ class InstanceTypeToolsTest(test.TestCase):
self._test_extract_flavor('foo_')
def test_save_flavor_info(self):
- instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
+ flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
example = {}
example_prefix = {}
for key in flavors.system_metadata_flavor_props.keys():
- example['instance_type_%s' % key] = instance_type[key]
- example_prefix['fooinstance_type_%s' % key] = instance_type[key]
+ example['instance_type_%s' % key] = flavor[key]
+ example_prefix['fooinstance_type_%s' % key] = flavor[key]
metadata = {}
- flavors.save_flavor_info(metadata, instance_type)
+ flavors.save_flavor_info(metadata, flavor)
self.assertEqual(example, metadata)
metadata = {}
- flavors.save_flavor_info(metadata, instance_type, 'foo')
+ flavors.save_flavor_info(metadata, flavor, 'foo')
self.assertEqual(example_prefix, metadata)
def test_flavor_numa_extras_are_saved(self):
- instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
- instance_type['extra_specs'] = {
+ flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
+ flavor['extra_specs'] = {
'hw:numa_mem.0': '123',
'hw:numa_cpus.0': '456',
'hw:numa_mem.1': '789',
'hw:numa_cpus.1': 'ABC',
'foo': 'bar',
}
- sysmeta = flavors.save_flavor_info({}, instance_type)
- _instance_type = flavors.extract_flavor({'system_metadata': sysmeta})
+ sysmeta = flavors.save_flavor_info({}, flavor)
+ _flavor = flavors.extract_flavor({'system_metadata': sysmeta})
expected_extra_specs = {
'hw:numa_mem.0': '123',
'hw:numa_cpus.0': '456',
'hw:numa_mem.1': '789',
'hw:numa_cpus.1': 'ABC',
}
- self.assertEqual(expected_extra_specs, _instance_type['extra_specs'])
+ self.assertEqual(expected_extra_specs, _flavor['extra_specs'])
-class InstanceTypeFilteringTest(test.TestCase):
- """Test cases for the filter option available for instance_type_get_all."""
+class FlavorFilteringTest(test.TestCase):
+ """Test cases for the filter option available for FlavorList.get_all."""
def setUp(self):
- super(InstanceTypeFilteringTest, self).setUp()
+ super().setUp()
self.context = context.get_admin_context()
def assertFilterResults(self, filters, expected):
@@ -153,7 +153,7 @@ class InstanceTypeFilteringTest(test.TestCase):
self.assertFilterResults(filters, expected)
-class CreateInstanceTypeTest(test.TestCase):
+class CreateFlavorTest(test.TestCase):
def assertInvalidInput(self, *create_args, **create_kwargs):
self.assertRaises(exception.InvalidInput, flavors.create,
diff --git a/nova/tests/unit/test_notifications.py b/nova/tests/unit/test_notifications.py
index 78a1792704..6b17fabab4 100644
--- a/nova/tests/unit/test_notifications.py
+++ b/nova/tests/unit/test_notifications.py
@@ -66,11 +66,12 @@ class NotificationsTestCase(test.TestCase):
self.decorated_function_called = False
def _wrapped_create(self, params=None):
- instance_type = objects.Flavor.get_by_name(self.context, 'm1.tiny')
+ flavor = objects.Flavor.get_by_name(self.context, 'm1.tiny')
inst = objects.Instance(image_ref=uuids.image_ref,
user_id=self.user_id,
project_id=self.project_id,
- instance_type_id=instance_type['id'],
+ instance_type_id=flavor.id,
+ flavor=flavor,
root_gb=0,
ephemeral_gb=0,
access_ip_v4='1.2.3.4',
@@ -82,7 +83,6 @@ class NotificationsTestCase(test.TestCase):
inst._context = self.context
if params:
inst.update(params)
- inst.flavor = instance_type
inst.create()
return inst
diff --git a/nova/tests/unit/test_quota.py b/nova/tests/unit/test_quota.py
index 7555aeab41..8238cb808d 100644
--- a/nova/tests/unit/test_quota.py
+++ b/nova/tests/unit/test_quota.py
@@ -64,7 +64,7 @@ class QuotaIntegrationTestCase(test.TestCase):
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
- self.inst_type = objects.Flavor.get_by_name(self.context, 'm1.small')
+ self.flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
self.useFixture(nova_fixtures.GlanceFixture(self))
@@ -106,9 +106,9 @@ class QuotaIntegrationTestCase(test.TestCase):
self._create_instance()
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
try:
- self.compute_api.create(self.context, min_count=1, max_count=1,
- instance_type=self.inst_type,
- image_href=image_uuid)
+ self.compute_api.create(
+ self.context, min_count=1, max_count=1,
+ flavor=self.flavor, image_href=image_uuid)
except exception.QuotaError as e:
expected_kwargs = {'code': 413,
'req': '1, 1',
@@ -123,9 +123,9 @@ class QuotaIntegrationTestCase(test.TestCase):
self._create_instance()
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
try:
- self.compute_api.create(self.context, min_count=1, max_count=1,
- instance_type=self.inst_type,
- image_href=image_uuid)
+ self.compute_api.create(
+ self.context, min_count=1, max_count=1, flavor=self.flavor,
+ image_href=image_uuid)
except exception.QuotaError as e:
expected_kwargs = {'code': 413,
'req': '1',
@@ -149,27 +149,22 @@ class QuotaIntegrationTestCase(test.TestCase):
for i in range(CONF.quota.metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- self.assertRaises(exception.QuotaError, self.compute_api.create,
- self.context,
- min_count=1,
- max_count=1,
- instance_type=self.inst_type,
- image_href=image_uuid,
- metadata=metadata)
+ self.assertRaises(
+ exception.QuotaError, self.compute_api.create,
+ self.context, min_count=1, max_count=1, flavor=self.flavor,
+ image_href=image_uuid, metadata=metadata)
def _create_with_injected_files(self, files):
api = self.compute_api
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- api.create(self.context, min_count=1, max_count=1,
- instance_type=self.inst_type, image_href=image_uuid,
- injected_files=files)
+ api.create(
+ self.context, min_count=1, max_count=1, flavor=self.flavor,
+ image_href=image_uuid, injected_files=files)
def test_no_injected_files(self):
api = self.compute_api
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- api.create(self.context,
- instance_type=self.inst_type,
- image_href=image_uuid)
+ api.create(self.context, flavor=self.flavor, image_href=image_uuid)
def test_max_injected_files(self):
files = []
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 119666585a..ea5c2982b5 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -2180,20 +2180,15 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(objects.Instance, 'save')
def _test_rebuild(self, mock_save, mock_add_instance_info, mock_set_pstate,
mock_looping, mock_wait_active, preserve=False):
- node_id = uuidutils.generate_uuid()
- node = _get_cached_node(id=node_id, instance_id=self.instance_id,
- instance_type_id=5)
+ node_uuid = uuidutils.generate_uuid()
+ node = _get_cached_node(id=node_uuid, instance_id=self.instance_id)
self.mock_conn.get_node.return_value = node
image_meta = ironic_utils.get_test_image_meta()
- flavor_id = 5
- flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
+ flavor = objects.Flavor(flavor_id=5, name='baremetal')
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=self.instance_uuid,
- node=node_id,
- instance_type_id=flavor_id)
- instance.flavor = flavor
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, uuid=self.instance_uuid, node=node_uuid, flavor=flavor)
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
@@ -2210,9 +2205,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
node, instance,
test.MatchType(objects.ImageMeta),
flavor, preserve)
- mock_set_pstate.assert_called_once_with(node_id,
- ironic_states.REBUILD,
- configdrive=mock.ANY)
+ mock_set_pstate.assert_called_once_with(
+ node_uuid, ironic_states.REBUILD, configdrive=mock.ANY)
mock_looping.assert_called_once_with(mock_wait_active, instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
@@ -2256,21 +2250,16 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_configdrive):
node_uuid = uuidutils.generate_uuid()
node = _get_cached_node(
- uuid=node_uuid, instance_uuid=self.instance_uuid,
- instance_type_id=5)
+ uuid=node_uuid, instance_uuid=self.instance_uuid)
mock_get.return_value = node
mock_required_by.return_value = True
mock_configdrive.side_effect = exception.NovaException()
image_meta = ironic_utils.get_test_image_meta()
- flavor_id = 5
- flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
+ flavor = objects.Flavor(flavor_id=5, name='baremetal')
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=self.instance_uuid,
- node=node_uuid,
- instance_type_id=flavor_id)
- instance.flavor = flavor
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, uuid=self.instance_uuid, node=node_uuid, flavor=flavor)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.rebuild,
@@ -2291,20 +2280,15 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_required_by, mock_configdrive):
node_uuid = uuidutils.generate_uuid()
node = _get_cached_node(
- uuid=node_uuid, instance_uuid=self.instance_uuid,
- instance_type_id=5)
+ uuid=node_uuid, instance_uuid=self.instance_uuid)
mock_get.return_value = node
mock_required_by.return_value = False
image_meta = ironic_utils.get_test_image_meta()
- flavor_id = 5
- flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
+ flavor = objects.Flavor(flavor_id=5, name='baremetal')
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=self.instance_uuid,
- node=node_uuid,
- instance_type_id=flavor_id)
- instance.flavor = flavor
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, uuid=self.instance_uuid, node=node_uuid, flavor=flavor)
exceptions = [
exception.NovaException(),
@@ -2329,7 +2313,6 @@ class IronicDriverTestCase(test.NoDBTestCase):
host=hostname)
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
- instance_type_id=5,
network_interface='flat')
mock_get.return_value = node
host_id = self.driver.network_binding_host_id(self.ctx, instance)
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
index a979368ee9..7707f745e3 100644
--- a/nova/tests/unit/virt/libvirt/test_blockinfo.py
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -42,6 +42,12 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
self.project_id = 'fake'
self.context = context.get_admin_context()
self.useFixture(nova_fixtures.GlanceFixture(self))
+
+ flavor = objects.Flavor(
+ id=2, name='m1.micro', vcpus=1, memory_mb=128, root_gb=0,
+ ephemeral_gb=0, swap=0, rxtx_factor=1.0, flavorid='1',
+ vcpu_weight=None,)
+
self.test_instance = {
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
@@ -53,7 +59,10 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
- 'instance_type_id': 2, # m1.tiny
+ 'instance_type_id': flavor.id,
+ 'flavor': flavor,
+ 'old_flavor': None,
+ 'new_flavor': None,
'config_drive': None,
'launched_at': None,
'system_metadata': {},
@@ -62,20 +71,6 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'disk_format': 'raw',
}
- flavor = objects.Flavor(memory_mb=128,
- root_gb=0,
- name='m1.micro',
- ephemeral_gb=0,
- vcpus=1,
- swap=0,
- rxtx_factor=1.0,
- flavorid='1',
- vcpu_weight=None,
- id=2)
- self.test_instance['flavor'] = flavor
- self.test_instance['old_flavor'] = None
- self.test_instance['new_flavor'] = None
-
def _test_block_device_info(self, with_eph=True, with_swap=True,
with_bdms=True):
swap = {'device_name': '/dev/vdb', 'swap_size': 1}
@@ -1361,30 +1356,29 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
def setUp(self):
super(DefaultDeviceNamesTestCase, self).setUp()
self.context = context.get_admin_context()
+ self.flavor = objects.Flavor(id=2, swap=4)
self.instance = objects.Instance(
- uuid='32dfcb37-5af1-552b-357c-be8c3aa38310',
- memory_kb='1024000',
- basepath='/some/path',
- bridge_name='br100',
- vcpus=2,
- project_id='fake',
- bridge='br101',
- image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
- root_gb=10,
- ephemeral_gb=20,
- instance_type_id=2,
- config_drive=False,
- root_device_name = '/dev/vda',
- system_metadata={})
+ uuid='32dfcb37-5af1-552b-357c-be8c3aa38310',
+ memory_kb='1024000',
+ basepath='/some/path',
+ bridge_name='br100',
+ vcpus=2,
+ project_id='fake',
+ bridge='br101',
+ image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ root_gb=10,
+ ephemeral_gb=20,
+ instance_type_id=self.flavor.id,
+ flavor=self.flavor,
+ config_drive=False,
+ root_device_name = '/dev/vda',
+ system_metadata={})
self.image_meta = objects.ImageMeta(
disk_format='raw',
properties=objects.ImageMetaProps())
self.virt_type = 'kvm'
- self.flavor = objects.Flavor(swap=4)
self.patchers = []
- self.patchers.append(mock.patch.object(self.instance, 'get_flavor',
- return_value=self.flavor))
self.patchers.append(mock.patch(
'nova.objects.block_device.BlockDeviceMapping.save'))
for patcher in self.patchers:
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 931154ad6f..97c39b5354 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -685,11 +685,10 @@ def _create_test_instance():
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
- 'instance_type_id': '5', # m1.small
- 'extra_specs': {},
'system_metadata': {
'image_disk_format': 'raw'
},
+ 'instance_type_id': flavor.id,
'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
@@ -2844,7 +2843,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
claim = mock.Mock(autospec=True)
claimed_numa_topology = objects.InstanceNUMATopology()
claim.claimed_numa_topology = claimed_numa_topology
- claim.instance_type = instance.flavor
+ claim.flavor = instance.flavor
numa_info = objects.LibvirtLiveMigrateNUMAInfo()
with test.nested(
mock.patch.object(drvr, '_get_live_migrate_numa_info',
@@ -6692,8 +6691,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
group='spice')
instance_ref = objects.Instance(**self.test_instance)
- instance_type = instance_ref.get_flavor()
- instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_video:ram_max_mb': "50"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
@@ -9049,6 +9048,19 @@ class LibvirtConnTestCase(test.NoDBTestCase,
uuids.volume_id)
mock_encryptor.detach_volume.assert_not_called()
+ # assert that no attempt to remove the secret is made when
+ # destroy_secrets=False
+ drvr._host.find_secret.reset_mock()
+ drvr._host.delete_secret.reset_mock()
+ drvr._disconnect_volume(
+ self.context,
+ connection_info,
+ instance,
+ encryption=encryption,
+ destroy_secrets=False
+ )
+ drvr._host.delete_secret.assert_not_called()
+
# assert that the encryptor is used if no secret is found
drvr._host.find_secret.reset_mock()
drvr._host.delete_secret.reset_mock()
@@ -10112,6 +10124,36 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_find_secret.assert_called_once_with('volume', uuids.volume_id)
mock_get_encryptor.assert_not_called()
+ @mock.patch('nova.virt.libvirt.host.Host.delete_secret')
+ @mock.patch('nova.virt.libvirt.host.Host.find_secret', new=mock.Mock())
+ def test_detach_encryptor_skip_secret_removal(self, mock_delete_secret):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr._detach_encryptor(
+ self.context,
+ {
+ 'data': {
+ 'volume_id': uuids.volume_id
+ }
+ },
+ None,
+ destroy_secrets=False
+ )
+ # Assert that no attempt is made to delete the volume secert
+ mock_delete_secret.assert_not_called()
+
+ drvr._detach_encryptor(
+ self.context,
+ {
+ 'data': {
+ 'volume_id': uuids.volume_id
+ }
+ },
+ None,
+ destroy_secrets=True
+ )
+ # Assert that volume secert is deleted
+ mock_delete_secret.assert_called_once_with('volume', uuids.volume_id)
+
def test_allow_native_luksv1(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._allow_native_luksv1({}))
@@ -14570,7 +14612,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_spawn_without_image_meta(self):
instance_ref = self.test_instance
- instance_ref['image_ref'] = 1
+ instance_ref['image_ref'] = uuids.image_ref
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -14773,7 +14815,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
nodeDeviceLookupByName=fake_node_device_lookup_by_name)
instance_ref = self.test_instance
- instance_ref['image_ref'] = 'my_fake_image'
+ instance_ref['image_ref'] = uuids.image_ref
instance = objects.Instance(**instance_ref)
instance['pci_devices'] = objects.PciDeviceList(
objects=[objects.PciDevice(address='0000:00:00.0')])
@@ -14809,7 +14851,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance_ref = self.test_instance
- instance_ref['image_ref'] = 'my_fake_image'
+ instance_ref['image_ref'] = uuids.image_ref
instance = objects.Instance(**instance_ref)
instance.system_metadata = {}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
@@ -14823,7 +14865,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
gotFiles = []
instance_ref = self.test_instance
- instance_ref['image_ref'] = 1
+ instance_ref['image_ref'] = uuids.image_ref
instance = objects.Instance(**instance_ref)
instance['os_type'] = os_type
@@ -14854,12 +14896,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
disk_info, image_meta)
wantFiles = [
- {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
+ {'filename': imagecache.get_cache_fname(uuids.image_ref),
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * units.Gi},
]
- self.assertEqual(gotFiles, wantFiles)
+ self.assertEqual(wantFiles, gotFiles)
def test_create_image_plain_os_type_blank(self):
self._test_create_image_plain(os_type='',
@@ -15149,7 +15191,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
block_device_info = {
'ephemerals': ephemerals}
instance_ref = self.test_instance
- instance_ref['image_ref'] = 1
+ instance_ref['image_ref'] = uuids.image_ref
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
@@ -15752,7 +15794,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_domain_destroy.assert_called_once_with()
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
- network_info, None, False)
+ network_info, None, False,
+ destroy_secrets=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@@ -15772,7 +15815,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.call(instance)])
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
- network_info, None, False)
+ network_info, None, False,
+ destroy_secrets=True)
@mock.patch.object(host.Host, 'get_guest')
def test_reboot_different_ids(self, mock_get):
@@ -15993,7 +16037,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_mdev.assert_called_once_with(instance)
mock_destroy.assert_called_once_with(self.context, instance,
network_info, destroy_disks=False,
- block_device_info=block_device_info)
+ block_device_info=block_device_info,
+ destroy_secrets=False)
mock_get_guest_xml.assert_called_once_with(self.context, instance,
network_info, mock.ANY, mock.ANY,
@@ -16158,7 +16203,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
- return_value=None)
+ return_value={})
def test_attach_direct_passthrough_ports(self,
mock_get_image_metadata, mock_ID, mock_attachDevice):
instance = objects.Instance(**self.test_instance)
@@ -16177,7 +16222,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
- return_value=None)
+ return_value={})
def test_attach_direct_physical_passthrough_ports(self,
mock_get_image_metadata, mock_ID, mock_attachDevice):
instance = objects.Instance(**self.test_instance)
@@ -16196,7 +16241,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
- return_value=None)
+ return_value={})
def test_attach_direct_passthrough_ports_with_info_cache(self,
mock_get_image_metadata, mock_ID, mock_attachDevice):
instance = objects.Instance(**self.test_instance)
@@ -18851,7 +18896,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(id=1, uuid=uuids.instance,
- image_ref='my_fake_image')
+ image_ref=uuids.image_ref)
with test.nested(
mock.patch.object(drvr, '_create_domain_setup_lxc'),
@@ -19278,6 +19323,59 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(instance.cleaned)
save.assert_called_once_with()
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vpmems',
+ new=mock.Mock(return_value=None))
+ def test_cleanup_destroy_secrets(self, mock_disconnect_volume):
+ block_device_info = {
+ 'block_device_mapping': [
+ {
+ 'connection_info': mock.sentinel.connection_info
+ }
+ ]
+ }
+ instance = objects.Instance(self.context, **self.test_instance)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ # Pass destroy_vifs=False and destroy_disks=False as we only care about
+ # asserting the behaviour of destroy_secrets in this test.
+ drvr.cleanup(
+ self.context,
+ instance,
+ network_info={},
+ block_device_info=block_device_info,
+ destroy_vifs=False,
+ destroy_disks=False,
+ destroy_secrets=False
+ )
+ drvr.cleanup(
+ self.context,
+ instance,
+ network_info={},
+ block_device_info=block_device_info,
+ destroy_vifs=False,
+ destroy_disks=False,
+ )
+
+ # Assert that disconnect_volume is called with destroy_secrets=False
+ # and destroy_secrets=True by default
+ mock_disconnect_volume.assert_has_calls([
+ mock.call(
+ self.context,
+ mock.sentinel.connection_info,
+ instance,
+ destroy_secrets=False
+ ),
+ mock.call(
+ self.context,
+ mock.sentinel.connection_info,
+ instance,
+ destroy_secrets=True
+ )
+ ])
+
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_allow_native_luksv1')
def test_swap_volume_native_luks_blocked(self, mock_allow_native_luksv1,
@@ -21149,7 +21247,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
- inst['instance_type_id'] = 2
+ inst['instance_type_id'] = flavor.id
+ inst['flavor'] = flavor
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = flavor.root_gb
@@ -21166,9 +21265,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
inst.update(params)
instance = fake_instance.fake_instance_obj(
- self.context, expected_attrs=['metadata', 'system_metadata',
- 'pci_devices'],
- flavor=flavor, **inst)
+ self.context,
+ expected_attrs=['metadata', 'system_metadata', 'pci_devices'],
+ **inst)
# Attributes which we need to be set so they don't touch the db,
# but it's not worth the effort to fake properly
@@ -22588,8 +22687,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# file, instance is back to nominal state: after unshelve,
# instance.image_ref will match current backing file.
self._test_qcow2_rebase_image_during_create(
- image_ref='snapshot_id_of_shelved_instance',
- base_image_ref='original_image_id',
+ image_ref=uuids.shelved_instance_snapshot_id,
+ base_image_ref=uuids.original_image_id,
vm_state=vm_states.SHELVED_OFFLOADED,
rebase_expected=True)
@@ -22598,23 +22697,23 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# will failed (HTTP 404). In that case qemu-img rebase will merge
# backing file into disk, removing backing file dependency.
self._test_qcow2_rebase_image_during_create(
- image_ref='snapshot_id_of_shelved_instance',
- base_image_ref='original_image_id',
+ image_ref=uuids.shelved_instance_snapshot_id,
+ base_image_ref=uuids.original_image_id,
vm_state=vm_states.SHELVED_OFFLOADED,
original_image_in_glance=False,
rebase_expected=True)
def test_cross_cell_resize_qcow2_rebase_image_during_create(self):
self._test_qcow2_rebase_image_during_create(
- image_ref='snapshot_id_of_resized_instance',
- base_image_ref='original_image_id',
+ image_ref=uuids.resized_instance_snapshot_id,
+ base_image_ref=uuids.original_image_id,
task_state=task_states.RESIZE_FINISH,
rebase_expected=True)
def test_cross_cell_resize_qcow2_rebase_image_during_create_notfound(self):
self._test_qcow2_rebase_image_during_create(
- image_ref='snapshot_id_of_resized_instance',
- base_image_ref='original_image_id',
+ image_ref=uuids.resized_instance_snapshot_id,
+ base_image_ref=uuids.original_image_id,
task_state=task_states.RESIZE_FINISH,
original_image_in_glance=False,
rebase_expected=True)
@@ -22624,8 +22723,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# consequently, instance.image_ref remain the same and we must ensure
# that no rebase is done.
self._test_qcow2_rebase_image_during_create(
- image_ref='original_image_id',
- base_image_ref='original_image_id',
+ image_ref=uuids.original_image_id,
+ base_image_ref=uuids.original_image_id,
task_state=task_states.RESIZE_FINISH,
rebase_expected=False)
@@ -26318,6 +26417,7 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
self.inst = {}
self.inst['uuid'] = uuids.fake
self.inst['id'] = '1'
+ self.inst['image_ref'] = ''
# system_metadata is needed for objects.Instance.image_meta conversion
self.inst['system_metadata'] = {}
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
index 2acd7e0aff..e9f2707f73 100644
--- a/nova/tests/unit/virt/libvirt/test_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -15,6 +15,7 @@
import base64
import errno
+import inspect
import os
import shutil
import tempfile
@@ -38,7 +39,6 @@ from nova import objects
from nova.storage import rbd_utils
from nova import test
from nova.tests.unit import fake_processutils
-from nova import utils
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import config as vconfig
@@ -1488,8 +1488,10 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
- self.assertEqual(utils.getargspec(imagebackend.Image.libvirt_info),
- utils.getargspec(self.image_class.libvirt_info))
+ self.assertEqual(
+ inspect.getfullargspec(imagebackend.Image.libvirt_info),
+ inspect.getfullargspec(self.image_class.libvirt_info)
+ )
def test_image_path(self):
diff --git a/nova/tests/unit/virt/libvirt/test_machine_type_utils.py b/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
index 6ea68e64cc..42043ac495 100644
--- a/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
@@ -36,7 +36,8 @@ class TestMachineTypeUtils(test.NoDBTestCase):
uuid=uuidsentinel.instance, host='fake', node='fake',
task_state=None, flavor=objects.Flavor(),
project_id='fake-project', user_id='fake-user',
- vm_state=vm_state, system_metadata={}
+ vm_state=vm_state, system_metadata={},
+ image_ref=uuidsentinel.image_ref
)
if mtype:
instance.system_metadata = {
diff --git a/nova/tests/unit/virt/libvirt/test_migration.py b/nova/tests/unit/virt/libvirt/test_migration.py
index 72f714dd53..4387951f75 100644
--- a/nova/tests/unit/virt/libvirt/test_migration.py
+++ b/nova/tests/unit/virt/libvirt/test_migration.py
@@ -954,12 +954,12 @@ class MigrationMonitorTestCase(test.NoDBTestCase):
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
- 'instance_type_id': '5', # m1.small
+ 'instance_type_id': flavor.id,
+ 'flavor': flavor,
'extra_specs': {},
'system_metadata': {
'image_disk_format': 'raw',
},
- 'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
'pci_devices': objects.PciDeviceList(),
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
index 1b4928dca5..579a0ed1c6 100644
--- a/nova/tests/unit/virt/libvirt/test_vif.py
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -1054,6 +1054,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
d1 = vif.LibvirtGenericVIFDriver()
ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
+ image_ref=uuids.image_ref,
project_id=723, system_metadata={}
)
d1.plug(ins, self.vif_tap)
@@ -1065,6 +1066,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
d2 = vif.LibvirtGenericVIFDriver()
mq_ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
+ image_ref=uuids.image_ref,
project_id=723, system_metadata={
'image_hw_vif_multiqueue_enabled': 'True'
}
@@ -1086,6 +1088,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
d1 = vif.LibvirtGenericVIFDriver()
ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
+ image_ref=uuids.image_ref,
project_id=723, system_metadata={
'image_hw_vif_multiqueue_enabled': 'True'
}
@@ -1104,6 +1107,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
d1 = vif.LibvirtGenericVIFDriver()
ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
+ image_ref=uuids.image_ref,
project_id=723, system_metadata={
'image_hw_vif_multiqueue_enabled': 'True',
'image_hw_vif_model': 'e1000',
diff --git a/nova/tests/unit/virt/test_configdrive.py b/nova/tests/unit/virt/test_configdrive.py
index b5946671f1..6481879d15 100644
--- a/nova/tests/unit/virt/test_configdrive.py
+++ b/nova/tests/unit/virt/test_configdrive.py
@@ -23,6 +23,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
instance = objects.Instance(
config_drive="yes",
+ image_ref='',
system_metadata={
"image_img_config_drive": "mandatory",
}
@@ -34,6 +35,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
self.flags(force_config_drive=False)
instance = objects.Instance(
+ image_ref='',
config_drive=None,
system_metadata={
"image_img_config_drive": "mandatory",
@@ -46,6 +48,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
self.flags(force_config_drive=True)
instance = objects.Instance(
+ image_ref='',
config_drive=None,
launched_at=None,
system_metadata={
@@ -59,6 +62,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
self.flags(force_config_drive=True)
instance = objects.Instance(
+ image_ref='',
config_drive=None,
launched_at='2019-05-17T00:00:00.000000',
system_metadata={
@@ -72,6 +76,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
self.flags(force_config_drive=False)
instance = objects.Instance(
+ image_ref='',
config_drive=None,
system_metadata={
"image_img_config_drive": "optional",
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 82aace61d6..f5d88f5801 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -5421,7 +5421,7 @@ class MemEncryptionRequestedWithoutUEFITestCase(
for image_prop in ('1', 'true', 'True'):
self._test_encrypted_memory_support_no_uefi(
None, image_prop,
- "hw_mem_encryption property of image %s" % self.image_name)
+ "hw_mem_encryption property of image %s" % self.image_id)
def test_flavor_image_require_encrypted_memory_support_no_uefi(self):
for extra_spec in ('1', 'true', 'True'):
@@ -5430,7 +5430,7 @@ class MemEncryptionRequestedWithoutUEFITestCase(
extra_spec, image_prop,
"hw:mem_encryption extra spec in %s flavor and "
"hw_mem_encryption property of image %s"
- % (self.flavor_name, self.image_name))
+ % (self.flavor_name, self.image_id))
class MemEncryptionRequestedWithInvalidMachineTypeTestCase(
@@ -5508,7 +5508,7 @@ class MemEncryptionRequiredTestCase(test.NoDBTestCase):
self._test_encrypted_memory_support_required(
{},
{'hw_mem_encryption': image_prop},
- "hw_mem_encryption property of image %s" % self.image_name
+ "hw_mem_encryption property of image %s" % self.image_id
)
def test_require_encrypted_memory_support_both_required(self):
@@ -5519,7 +5519,7 @@ class MemEncryptionRequiredTestCase(test.NoDBTestCase):
{'hw_mem_encryption': image_prop},
"hw:mem_encryption extra spec in %s flavor and "
"hw_mem_encryption property of image %s" %
- (self.flavor_name, self.image_name)
+ (self.flavor_name, self.image_id)
)
diff --git a/nova/tests/unit/virt/vmwareapi/test_driver_api.py b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
index a73b7194c4..4086a7d7cc 100644
--- a/nova/tests/unit/virt/vmwareapi/test_driver_api.py
+++ b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
@@ -343,10 +343,10 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self.conn = driver.VMwareAPISession()
self.assertEqual(2, self.attempts)
- def _get_instance_type_by_name(self, type):
- for instance_type in DEFAULT_FLAVOR_OBJS:
- if instance_type.name == type:
- return instance_type
+ def _get_flavor_by_name(self, type):
+ for flavor in DEFAULT_FLAVOR_OBJS:
+ if flavor.name == type:
+ return flavor
if type == 'm1.micro':
return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
'name': 'm1.micro', 'deleted': 0, 'created_at': None,
@@ -356,15 +356,15 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'flavorid': '1', 'vcpu_weight': None, 'id': 2}
def _create_instance(self, node=None, set_image_ref=True,
- uuid=None, instance_type='m1.large',
- ephemeral=None, instance_type_updates=None):
+ uuid=None, flavor='m1.large',
+ ephemeral=None, flavor_updates=None):
if not node:
node = self.node_name
if not uuid:
uuid = uuidutils.generate_uuid()
- self.type_data = dict(self._get_instance_type_by_name(instance_type))
- if instance_type_updates:
- self.type_data.update(instance_type_updates)
+ self.type_data = dict(self._get_flavor_by_name(flavor))
+ if flavor_updates:
+ self.type_data.update(flavor_updates)
if ephemeral is not None:
self.type_data['ephemeral_gb'] = ephemeral
values = {'name': 'fake_name',
@@ -393,15 +393,15 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self.context, **values)
def _create_vm(self, node=None, num_instances=1, uuid=None,
- instance_type='m1.large', powered_on=True,
- ephemeral=None, bdi=None, instance_type_updates=None):
+ flavor='m1.large', powered_on=True,
+ ephemeral=None, bdi=None, flavor_updates=None):
"""Create and spawn the VM."""
if not node:
node = self.node_name
self._create_instance(node=node, uuid=uuid,
- instance_type=instance_type,
+ flavor=flavor,
ephemeral=ephemeral,
- instance_type_updates=instance_type_updates)
+ flavor_updates=flavor_updates)
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None, allocations={},
@@ -550,9 +550,9 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
vmwareapi_fake.assertPathExists(self, str(path))
vmwareapi_fake.assertPathExists(self, str(root))
- def _iso_disk_type_created(self, instance_type='m1.large'):
+ def _iso_disk_type_created(self, flavor='m1.large'):
self.image.disk_format = 'iso'
- self._create_vm(instance_type=instance_type)
+ self._create_vm(flavor=flavor)
path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid)
@@ -564,7 +564,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
vmwareapi_fake.assertPathExists(self, str(path))
def test_iso_disk_type_created_with_root_gb_0(self):
- self._iso_disk_type_created(instance_type='m1.micro')
+ self._iso_disk_type_created(flavor='m1.micro')
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathNotExists(self, str(path))
@@ -766,7 +766,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_root_size_0(self):
- self._create_vm(instance_type='m1.micro')
+ self._create_vm(flavor='m1.micro')
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
cache = ('[%s] vmware_base/%s/%s.vmdk' %
@@ -1197,7 +1197,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def test_spawn_hw_versions(self):
updates = {'extra_specs': {'vmware:hw_version': 'vmx-08'}}
- self._create_vm(instance_type_updates=updates)
+ self._create_vm(flavor_updates=updates)
vm = self._get_vm_record()
version = vm.get("version")
self.assertEqual('vmx-08', version)
@@ -1273,7 +1273,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self._test_snapshot()
def test_snapshot_no_root_disk(self):
- self._iso_disk_type_created(instance_type='m1.micro')
+ self._iso_disk_type_created(flavor='m1.micro')
self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
@@ -1688,6 +1688,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
hypervisor_os='esxi',
config_drive=True)
instance = objects.Instance(uuid=self.uuid,
+ image_ref='',
config_drive=False,
system_metadata={},
node=self.instance_node)
@@ -2342,8 +2343,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self.context, self.instance, vif)
def test_resize_to_smaller_disk(self):
- self._create_vm(instance_type='m1.large')
- flavor = self._get_instance_type_by_name('m1.small')
+ self._create_vm(flavor='m1.large')
+ flavor = self._get_flavor_by_name('m1.small')
self.assertRaises(exception.InstanceFaultRollback,
self.conn.migrate_disk_and_power_off, self.context,
self.instance, 'fake_dest', flavor, None)
diff --git a/nova/utils.py b/nova/utils.py
index 5239dd4ba8..c1b4fccc47 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -80,12 +80,6 @@ _FILE_CACHE = {}
_SERVICE_TYPES = service_types.ServiceTypes()
-if hasattr(inspect, 'getfullargspec'):
- getargspec = inspect.getfullargspec
-else:
- getargspec = inspect.getargspec
-
-
# NOTE(mikal): this seems to have to stay for now to handle os-brick
# requirements. This makes me a sad panda.
def get_root_helper():
@@ -553,7 +547,7 @@ def expects_func_args(*args):
@functools.wraps(dec)
def _decorator(f):
base_f = safe_utils.get_wrapped_function(f)
- argspec = getargspec(base_f)
+ argspec = inspect.getfullargspec(base_f)
if argspec[1] or argspec[2] or set(args) <= set(argspec[0]):
# NOTE (ndipanov): We can't really tell if correct stuff will
# be passed if it's a function with *args or **kwargs so
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 5c96d4041a..eea6ae2440 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -412,7 +412,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def destroy(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True):
+ destroy_disks=True, destroy_secrets=True):
"""Destroy the specified instance from the Hypervisor.
If the instance is not found (for example if networking failed), this
@@ -425,11 +425,13 @@ class ComputeDriver(object):
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
+ :param destroy_secrets: Indicates if secrets should be destroyed
"""
raise NotImplementedError()
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None, destroy_vifs=True):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True,
+ destroy_secrets=True):
"""Cleanup the instance resources .
Instance should have been destroyed from the Hypervisor before calling
@@ -442,6 +444,8 @@ class ComputeDriver(object):
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
+ :param destroy_vifs: Indicates if vifs should be unplugged
+ :param destroy_secrets: Indicates if secrets should be destroyed
"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 0c2476cbfb..008aa94486 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -308,7 +308,7 @@ class FakeDriver(driver.ComputeDriver):
pass
def destroy(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True):
+ destroy_disks=True, destroy_secrets=True):
key = instance.uuid
if key in self.instances:
flavor = instance.flavor
@@ -323,7 +323,8 @@ class FakeDriver(driver.ComputeDriver):
'inst': self.instances}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None, destroy_vifs=True):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True,
+ destroy_secrets=True):
# cleanup() should not be called when the guest has not been destroyed.
if instance.uuid in self.instances:
raise exception.InstanceExists(
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 043ee648fa..1f9513d4d0 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -1279,7 +1279,7 @@ def get_mem_encryption_constraint(
cannot be called since it relies on being run from the compute
node in order to retrieve CONF.libvirt.hw_machine_type.
- :param instance_type: Flavor object
+ :param flavor: Flavor object
:param image: an ImageMeta object
:param machine_type: a string representing the machine type (optional)
:raises: nova.exception.FlavorImageConflict
@@ -1313,7 +1313,7 @@ def get_mem_encryption_constraint(
flavor.name)
if image_mem_enc:
requesters.append("hw_mem_encryption property of image %s" %
- image_meta.name)
+ image_meta.id)
_check_mem_encryption_uses_uefi_image(requesters, image_meta)
_check_mem_encryption_machine_type(image_meta, machine_type)
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 66f620da69..16cb9db8e9 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -172,12 +172,13 @@ class HyperVDriver(driver.ComputeDriver):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True):
+ destroy_disks=True, destroy_secrets=True):
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None, destroy_vifs=True):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True,
+ destroy_secrets=True):
"""Cleanup after instance being destroyed by Hypervisor."""
self.unplug_vifs(instance, network_info)
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 690724ac20..6c03d03b7b 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -1263,7 +1263,8 @@ class IronicDriver(virt_driver.ComputeDriver):
_sync_remove_cache_entry()
def destroy(self, context, instance, network_info,
- block_device_info=None, destroy_disks=True):
+ block_device_info=None, destroy_disks=True,
+ destroy_secrets=True):
"""Destroy the specified instance, if it can be found.
:param context: The security context.
@@ -1273,6 +1274,8 @@ class IronicDriver(virt_driver.ComputeDriver):
information. Ignored by this driver.
:param destroy_disks: Indicates if disks should be
destroyed. Ignored by this driver.
+ :param destroy_secrets: Indicates if secrets should be
+ destroyed. Ignored by this driver.
"""
LOG.debug('Destroy called for instance', instance=instance)
try:
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 391231c527..65b6297faa 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1405,13 +1405,13 @@ class LibvirtDriver(driver.ComputeDriver):
self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True):
+ destroy_disks=True, destroy_secrets=True):
self._destroy(instance)
# NOTE(gibi): if there was device detach in progress then we need to
# unblock the waiting threads and clean up.
self._device_event_handler.cleanup_waiters(instance.uuid)
self.cleanup(context, instance, network_info, block_device_info,
- destroy_disks)
+ destroy_disks, destroy_secrets=destroy_secrets)
def _undefine_domain(self, instance):
try:
@@ -1438,7 +1438,8 @@ class LibvirtDriver(driver.ComputeDriver):
pass
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None, destroy_vifs=True):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True,
+ destroy_secrets=True):
"""Cleanup the instance from the host.
Identify if the instance disks and instance path should be removed
@@ -1452,6 +1453,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param destroy_disks: if local ephemeral disks should be destroyed
:param migrate_data: optional migrate_data object
:param destroy_vifs: if plugged vifs should be unplugged
+ :param destroy_secrets: Indicates if secrets should be destroyed
"""
cleanup_instance_dir = False
cleanup_instance_disks = False
@@ -1483,11 +1485,12 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=block_device_info,
destroy_vifs=destroy_vifs,
cleanup_instance_dir=cleanup_instance_dir,
- cleanup_instance_disks=cleanup_instance_disks)
+ cleanup_instance_disks=cleanup_instance_disks,
+ destroy_secrets=destroy_secrets)
def _cleanup(self, context, instance, network_info, block_device_info=None,
destroy_vifs=True, cleanup_instance_dir=False,
- cleanup_instance_disks=False):
+ cleanup_instance_disks=False, destroy_secrets=True):
"""Cleanup the domain and any attached resources from the host.
This method cleans up any pmem devices, unplugs VIFs, disconnects
@@ -1528,7 +1531,9 @@ class LibvirtDriver(driver.ComputeDriver):
continue
try:
- self._disconnect_volume(context, connection_info, instance)
+ self._disconnect_volume(
+ context, connection_info, instance,
+ destroy_secrets=destroy_secrets)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if cleanup_instance_disks:
@@ -1845,8 +1850,13 @@ class LibvirtDriver(driver.ComputeDriver):
return (False if connection_count > 1 else True)
def _disconnect_volume(self, context, connection_info, instance,
- encryption=None):
- self._detach_encryptor(context, connection_info, encryption=encryption)
+ encryption=None, destroy_secrets=True):
+ self._detach_encryptor(
+ context,
+ connection_info,
+ encryption=encryption,
+ destroy_secrets=destroy_secrets
+ )
vol_driver = self._get_volume_driver(connection_info)
volume_id = driver_block_device.get_volume_id(connection_info)
multiattach = connection_info.get('multiattach', False)
@@ -1959,7 +1969,8 @@ class LibvirtDriver(driver.ComputeDriver):
encryption)
encryptor.attach_volume(context, **encryption)
- def _detach_encryptor(self, context, connection_info, encryption):
+ def _detach_encryptor(self, context, connection_info, encryption,
+ destroy_secrets=True):
"""Detach the frontend encryptor if one is required by the volume.
The request context is only used when an encryption metadata dict is
@@ -1971,7 +1982,11 @@ class LibvirtDriver(driver.ComputeDriver):
"""
volume_id = driver_block_device.get_volume_id(connection_info)
if volume_id and self._host.find_secret('volume', volume_id):
+ if not destroy_secrets:
+ LOG.debug("Skipping volume secret destruction")
+ return
return self._host.delete_secret('volume', volume_id)
+
if encryption is None:
encryption = self._get_volume_encryption(context, connection_info)
@@ -2499,6 +2514,11 @@ class LibvirtDriver(driver.ComputeDriver):
except libvirt.libvirtError as ex:
code = ex.get_error_code()
msg = ex.get_error_message()
+ LOG.debug(
+ "Libvirt returned error while detaching device %s from "
+ "instance %s. Libvirt error code: %d, error message: %s.",
+ device_name, instance_uuid, code, msg
+ )
if code == libvirt.VIR_ERR_DEVICE_MISSING:
LOG.debug(
'Libvirt failed to detach device %s from instance %s '
@@ -3727,7 +3747,8 @@ class LibvirtDriver(driver.ComputeDriver):
# we can here without losing data. This allows us to re-initialise from
# scratch, and hopefully fix, most aspects of a non-functioning guest.
self.destroy(context, instance, network_info, destroy_disks=False,
- block_device_info=block_device_info)
+ block_device_info=block_device_info,
+ destroy_secrets=False)
# Convert the system metadata to image metadata
# NOTE(mdbooth): This is a workaround for stateless Nova compute
@@ -9016,8 +9037,7 @@ class LibvirtDriver(driver.ComputeDriver):
def post_claim_migrate_data(self, context, instance, migrate_data, claim):
migrate_data.dst_numa_info = self._get_live_migrate_numa_info(
- claim.claimed_numa_topology, claim.instance_type,
- claim.image_meta)
+ claim.claimed_numa_topology, claim.flavor, claim.image_meta)
return migrate_data
def _get_resources(self, instance, prefix=None):
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 6f2451150f..520d710cba 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -225,7 +225,8 @@ class VMwareVCDriver(driver.ComputeDriver):
LOG.debug('Extension %s already exists.', constants.EXTENSION_KEY)
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None, destroy_vifs=True):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True,
+ destroy_secrets=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
@@ -594,7 +595,7 @@ class VMwareVCDriver(driver.ComputeDriver):
instance=instance)
def destroy(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True):
+ destroy_disks=True, destroy_secrets=True):
"""Destroy VM instance."""
# Destroy gets triggered when Resource Claim in resource_tracker