summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml20
-rw-r--r--doc/source/admin/secure-live-migration-with-qemu-native-tls.rst11
-rw-r--r--doc/source/user/flavors.rst6
-rwxr-xr-xgate/live_migration/hooks/ceph.sh1
-rwxr-xr-xgate/live_migration/hooks/run_tests.sh5
-rwxr-xr-xgate/live_migration/hooks/utils.sh12
-rw-r--r--lower-constraints.txt173
-rw-r--r--nova/api/metadata/base.py12
-rw-r--r--nova/compute/api.py10
-rw-r--r--nova/compute/manager.py176
-rw-r--r--nova/conf/compute.py8
-rw-r--r--nova/conf/workarounds.py24
-rw-r--r--nova/console/websocketproxy.py17
-rw-r--r--nova/db/sqlalchemy/api.py23
-rw-r--r--nova/exception.py7
-rw-r--r--nova/image/glance.py2
-rw-r--r--nova/network/model.py23
-rw-r--r--nova/network/neutronv2/api.py8
-rw-r--r--nova/network/neutronv2/constants.py1
-rw-r--r--nova/objects/instance_mapping.py13
-rw-r--r--nova/tests/functional/db/test_archive.py15
-rw-r--r--nova/tests/functional/regressions/test_bug_1888395.py163
-rw-r--r--nova/tests/functional/test_monkey_patch.py45
-rw-r--r--nova/tests/unit/api/openstack/compute/test_shelve.py1
-rw-r--r--nova/tests/unit/compute/test_compute.py11
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py175
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py65
-rw-r--r--nova/tests/unit/db/test_db_api.py8
-rw-r--r--nova/tests/unit/image/test_glance.py5
-rw-r--r--nova/tests/unit/objects/test_instance_mapping.py9
-rw-r--r--nova/tests/unit/test_hacking.py2
-rw-r--r--nova/tests/unit/test_metadata.py16
-rw-r--r--nova/tests/unit/virt/libvirt/fake_imagebackend.py8
-rw-r--r--nova/tests/unit/virt/libvirt/fakelibvirt.py23
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py213
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_migration.py43
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py5
-rw-r--r--nova/virt/libvirt/driver.py112
-rw-r--r--nova/virt/libvirt/guest.py4
-rw-r--r--nova/virt/libvirt/host.py8
-rw-r--r--nova/virt/libvirt/migration.py11
-rw-r--r--releasenotes/notes/avoid_muli_ceph_download-4083decf501dba40.yaml19
-rw-r--r--releasenotes/notes/bug-1821755-7bd03319e34b6b10.yaml11
-rw-r--r--releasenotes/notes/bug-1841932-c871ac7b3b05d67e.yaml9
-rw-r--r--releasenotes/notes/console-proxy-reject-open-redirect-4ac0a7895acca7eb.yaml19
-rw-r--r--releasenotes/notes/restore-rocky-portbinding-semantics-48e9b1fa969cc5e9.yaml14
-rw-r--r--tox.ini39
48 files changed, 1291 insertions, 317 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index f80a9eba22..69ec4712f7 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -119,6 +119,17 @@
timeout: 3600
- job:
+ name: nova-tox-validate-backport
+ parent: openstack-tox
+ description: |
+ Determine whether a backport is ready to be merged by checking whether it
+ has already been merged to master or more recent stable branches.
+
+ Uses tox with the ``validate-backport`` environment.
+ vars:
+ tox_envlist: validate-backport
+
+- job:
name: nova-live-migration
parent: nova-dsvm-multinode-base
description: |
@@ -358,10 +369,8 @@
- project:
# Please try to keep the list of job names sorted alphabetically.
templates:
- - check-requirements
- integrated-gate-compute
- openstack-cover-jobs
- - openstack-lower-constraints-jobs
- openstack-python-jobs
- openstack-python3-train-jobs
- periodic-stable-jobs
@@ -393,6 +402,8 @@
- nova-next
- nova-tox-functional
- nova-tox-functional-py36
+ - nova-tox-validate-backport:
+ voting: false
- tempest-integrated-compute:
irrelevant-files: *dsvm-irrelevant-files
- tempest-slow-py3:
@@ -401,6 +412,8 @@
irrelevant-files: *dsvm-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *dsvm-irrelevant-files
+ - requirements-check:
+ nodeset: ubuntu-bionic
gate:
jobs:
- nova-grenade-multinode
@@ -409,6 +422,7 @@
- nova-tox-functional-py36
- nova-multi-cell
- nova-next
+ - nova-tox-validate-backport
- tempest-integrated-compute:
irrelevant-files: *dsvm-irrelevant-files
- tempest-slow-py3:
@@ -417,6 +431,8 @@
irrelevant-files: *dsvm-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *dsvm-irrelevant-files
+ - requirements-check:
+ nodeset: ubuntu-bionic
experimental:
jobs:
- ironic-tempest-bfv:
diff --git a/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
index 012d78e93b..fb76f656af 100644
--- a/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
+++ b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
@@ -120,10 +120,13 @@ Performing the migration
(1) On all relevant compute nodes, enable the
:oslo.config:option:`libvirt.live_migration_with_native_tls`
- configuration attribute::
+ configuration attribute and set the
+ :oslo.config:option:`libvirt.live_migration_scheme`
+ configuration attribute to tls::
[libvirt]
live_migration_with_native_tls = true
+ live_migration_scheme = tls
.. note::
Setting both
@@ -131,6 +134,12 @@ Performing the migration
:oslo.config:option:`libvirt.live_migration_tunnelled` at the
same time is invalid (and disallowed).
+ .. note::
+ Not setting
+ :oslo.config:option:`libvirt.live_migration_scheme` to ``tls``
+ will result in libvirt using the unencrypted TCP connection
+ without displaying any error or a warning in the logs.
+
And restart the ``nova-compute`` service::
$ systemctl restart openstack-nova-compute
diff --git a/doc/source/user/flavors.rst b/doc/source/user/flavors.rst
index 3e24fc0072..740a4edee0 100644
--- a/doc/source/user/flavors.rst
+++ b/doc/source/user/flavors.rst
@@ -706,10 +706,14 @@ Hiding hypervisor signature
As of the 18.0.0 Rocky release, this is only supported by the libvirt
driver.
+ Prior to the 21.0.0 Ussuri release, this was called
+ ``hide_hypervisor_id``. An alias is provided to provide backwards
+ compatibility.
+
.. code:: console
$ openstack flavor set FLAVOR-NAME \
- --property hide_hypervisor_id=VALUE
+ --property hw:hide_hypervisor_id=VALUE
Where:
diff --git a/gate/live_migration/hooks/ceph.sh b/gate/live_migration/hooks/ceph.sh
index 483f92b0e1..c588f7c9b2 100755
--- a/gate/live_migration/hooks/ceph.sh
+++ b/gate/live_migration/hooks/ceph.sh
@@ -8,6 +8,7 @@ function prepare_ceph {
configure_ceph
#install ceph-common package on compute nodes
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m raw -a "executable=/bin/bash
+ export CEPH_RELEASE=nautilus
source $BASE/new/devstack/functions
source $BASE/new/devstack/functions-common
git clone https://opendev.org/openstack/devstack-plugin-ceph /tmp/devstack-plugin-ceph
diff --git a/gate/live_migration/hooks/run_tests.sh b/gate/live_migration/hooks/run_tests.sh
index 00ad341634..7a5027f2c9 100755
--- a/gate/live_migration/hooks/run_tests.sh
+++ b/gate/live_migration/hooks/run_tests.sh
@@ -55,6 +55,11 @@ fi
echo '4. test with Ceph for root + ephemeral disks'
# Discover and set variables for the OS version so the devstack-plugin-ceph
# scripts can find the correct repository to install the ceph packages.
+# NOTE(lyarwood): Pin the CEPH_RELEASE to nautilus here as was the case
+# prior to https://review.opendev.org/c/openstack/devstack-plugin-ceph/+/777232
+# landing in the branchless plugin, we also have to pin in ceph.sh when
+# configuring ceph on a remote node via ansible.
+export CEPH_RELEASE=nautilus
GetOSVersion
prepare_ceph
GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf}
diff --git a/gate/live_migration/hooks/utils.sh b/gate/live_migration/hooks/utils.sh
index 9f98ca2e25..e494ae03f8 100755
--- a/gate/live_migration/hooks/utils.sh
+++ b/gate/live_migration/hooks/utils.sh
@@ -3,7 +3,17 @@
function run_tempest {
local message=$1
local tempest_regex=$2
- sudo -H -u tempest tox -eall -- $tempest_regex --concurrency=$TEMPEST_CONCURRENCY
+
+ # NOTE(gmann): Set upper constraint for Tempest run so that it matches
+ # with what devstack is using and does not recreate the tempest virtual
+ # env.
+ TEMPEST_VENV_UPPER_CONSTRAINTS=$(set +o xtrace &&
+ source $BASE/new/devstack/stackrc &&
+ echo $TEMPEST_VENV_UPPER_CONSTRAINTS)
+ export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS
+ echo "using $UPPER_CONSTRAINTS_FILE for tempest run"
+
+ sudo -H -u tempest UPPER_CONSTRAINTS_FILE=$UPPER_CONSTRAINTS_FILE tox -eall -- $tempest_regex --concurrency=$TEMPEST_CONCURRENCY
exitcode=$?
if [[ $exitcode -ne 0 ]]; then
die $LINENO "$message failure"
diff --git a/lower-constraints.txt b/lower-constraints.txt
deleted file mode 100644
index 597a4466c1..0000000000
--- a/lower-constraints.txt
+++ /dev/null
@@ -1,173 +0,0 @@
-alembic==0.9.8
-amqp==2.2.2
-appdirs==1.4.3
-asn1crypto==0.24.0
-attrs==17.4.0
-automaton==1.14.0
-Babel==2.3.4
-bandit==1.1.0
-cachetools==2.0.1
-castellan==0.16.0
-cffi==1.11.5
-cliff==2.11.0
-cmd2==0.8.1
-colorama==0.3.9
-contextlib2==0.5.5;python_version=='2.7'
-coverage==4.0
-cryptography==2.7
-cursive==0.2.1
-ddt==1.0.1
-debtcollector==1.19.0
-decorator==3.4.0
-deprecation==2.0
-dogpile.cache==0.6.5
-enum34==1.0.4
-enum-compat==0.0.2
-eventlet==0.20.0
-extras==1.0.0
-fasteners==0.14.1
-fixtures==3.0.0
-flake8==2.6.0
-future==0.16.0
-futurist==1.8.0
-gabbi==1.35.0
-gitdb2==2.0.3
-GitPython==2.1.8
-greenlet==0.4.10
-hacking==1.1.0
-idna==2.6
-iso8601==0.1.11
-Jinja2==2.10
-jmespath==0.9.3
-jsonpatch==1.21
-jsonpath-rw==1.4.0
-jsonpath-rw-ext==1.1.3
-jsonpointer==2.0
-jsonschema==2.6.0
-keystoneauth1==3.16.0
-keystonemiddleware==4.20.0
-kombu==4.1.0
-linecache2==1.0.0
-lxml==3.4.1
-Mako==1.0.7
-MarkupSafe==1.0
-mccabe==0.2.1
-microversion-parse==0.2.1
-mock==3.0.0
-monotonic==1.4
-mox3==0.20.0
-msgpack==0.5.6
-msgpack-python==0.5.6
-munch==2.2.0
-netaddr==0.7.18
-netifaces==0.10.4
-networkx==1.11
-numpy==1.14.2
-openstacksdk==0.35.0
-os-brick==2.6.1
-os-client-config==1.29.0
-os-resource-classes==0.4.0
-os-service-types==1.7.0
-os-traits==0.16.0
-os-vif==1.14.0
-os-win==3.0.0
-os-xenapi==0.3.3
-osc-lib==1.10.0
-oslo.cache==1.26.0
-oslo.concurrency==3.26.0
-oslo.config==6.1.0
-oslo.context==2.19.2
-oslo.db==4.44.0
-oslo.i18n==3.15.3
-oslo.log==3.36.0
-oslo.messaging==7.0.0
-oslo.middleware==3.31.0
-oslo.policy==1.35.0
-oslo.privsep==1.33.2
-oslo.reports==1.18.0
-oslo.rootwrap==5.8.0
-oslo.serialization==2.21.1
-oslo.service==1.40.1
-oslo.upgradecheck==0.1.1
-oslo.utils==3.40.2
-oslo.versionedobjects==1.35.0
-oslo.vmware==2.17.0
-oslotest==3.8.0
-osprofiler==1.4.0
-ovs==2.10.0
-ovsdbapp==0.15.0
-packaging==17.1
-paramiko==2.0.0
-Paste==2.0.2
-PasteDeploy==1.5.0
-pbr==2.0.0
-pluggy==0.6.0
-ply==3.11
-prettytable==0.7.1
-psutil==3.2.2
-psycopg2==2.7
-py==1.5.2
-pyasn1==0.4.2
-pyasn1-modules==0.2.1
-pycadf==2.7.0
-pycparser==2.18
-pyflakes==0.8.1
-pycodestyle==2.0.0
-pyinotify==0.9.6
-pyroute2==0.5.4
-PyJWT==1.7.0
-PyMySQL==0.7.6
-pyOpenSSL==17.5.0
-pyparsing==2.2.0
-pyperclip==1.6.0
-pypowervm==1.1.15
-pytest==3.4.2
-python-barbicanclient==4.5.2
-python-cinderclient==3.3.0
-python-dateutil==2.5.3
-python-editor==1.0.3
-python-glanceclient==2.8.0
-python-ironicclient==2.7.0
-python-keystoneclient==3.15.0
-python-mimeparse==1.6.0
-python-neutronclient==6.7.0
-python-subunit==1.2.0
-pytz==2018.3
-PyYAML==3.12
-repoze.lru==0.7
-requests==2.14.2
-requests-mock==1.2.0
-requestsexceptions==1.4.0
-retrying==1.3.3
-rfc3986==1.1.0
-Routes==2.3.1
-simplejson==3.13.2
-six==1.10.0
-smmap2==2.0.3
-sortedcontainers==2.1.0
-SQLAlchemy==1.2.19
-sqlalchemy-migrate==0.11.0
-sqlparse==0.2.4
-statsd==3.2.2
-stestr==2.0.0
-stevedore==1.20.0
-suds-jurko==0.6
-taskflow==2.16.0
-Tempita==0.5.2
-tenacity==4.9.0
-testrepository==0.0.20
-testresources==2.0.0
-testscenarios==0.4
-testtools==2.2.0
-tooz==1.58.0
-traceback2==1.4.0
-unittest2==1.1.0
-urllib3==1.22
-vine==1.1.4
-voluptuous==0.11.1
-warlock==1.2.0
-WebOb==1.8.2
-websockify==0.8.0
-wrapt==1.10.11
-wsgi-intercept==1.7.0
-zVMCloudConnector==1.3.0
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index a53b51c333..3cba366be0 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -122,8 +122,13 @@ class InstanceMetadata(object):
if not content:
content = []
+ # NOTE(gibi): this is not a cell targeted context even if we are called
+ # in a situation when the instance is in a different cell than the
+ # metadata service itself.
ctxt = context.get_admin_context()
+ self.mappings = _format_instance_mapping(instance)
+
# NOTE(danms): Sanitize the instance to limit the amount of stuff
# inside that may not pickle well (i.e. context). We also touch
# some of the things we'll lazy load later to make sure we keep their
@@ -145,8 +150,6 @@ class InstanceMetadata(object):
self.security_groups = secgroup_api.get_instance_security_groups(
ctxt, instance)
- self.mappings = _format_instance_mapping(ctxt, instance)
-
if instance.user_data is not None:
self.userdata_raw = base64.decode_as_bytes(instance.user_data)
else:
@@ -683,9 +686,8 @@ def get_metadata_by_instance_id(instance_id, address, ctxt=None):
return InstanceMetadata(instance, address)
-def _format_instance_mapping(ctxt, instance):
- bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
- ctxt, instance.uuid)
+def _format_instance_mapping(instance):
+ bdms = instance.get_bdms()
return block_device.instance_block_mapping(instance, bdms)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 4fa0cc2a5a..bc23cbeaa7 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -3392,6 +3392,8 @@ class API(base.Base):
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
+ new_sys_metadata.update({'image_base_image_ref': image_id})
+
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
@@ -3805,6 +3807,14 @@ class API(base.Base):
hypervisor.
"""
instance.task_state = task_states.SHELVING
+
+ # NOTE(aarents): Ensure image_base_image_ref is present as it will be
+ # needed during unshelve and instance rebuild done before Bug/1893618
+ # Fix dropped it.
+ instance.system_metadata.update(
+ {'image_base_image_ref': instance.image_ref}
+ )
+
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index eaedc0238f..ce74fec4c6 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1330,6 +1330,13 @@ class ComputeManager(manager.Manager):
eventlet.semaphore.BoundedSemaphore(
CONF.compute.max_concurrent_disk_ops)
+ if CONF.compute.max_disk_devices_to_attach == 0:
+ msg = _('[compute]max_disk_devices_to_attach has been set to 0, '
+ 'which will prevent instances from being able to boot. '
+ 'Set -1 for unlimited or set >= 1 to limit the maximum '
+ 'number of disk devices.')
+ raise exception.InvalidConfiguration(msg)
+
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = objects.InstanceList.get_by_host(
@@ -1580,7 +1587,11 @@ class ComputeManager(manager.Manager):
return [_decode(f) for f in injected_files]
def _validate_instance_group_policy(self, context, instance,
- scheduler_hints):
+ scheduler_hints=None):
+
+ if CONF.workarounds.disable_group_policy_check_upcall:
+ return
+
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# the policy. Since more than one instance may be scheduled at the
@@ -1589,29 +1600,63 @@ class ComputeManager(manager.Manager):
# multiple instances with an affinity policy could end up on different
# hosts. This is a validation step to make sure that starting the
# instance here doesn't violate the policy.
- group_hint = scheduler_hints.get('group')
- if not group_hint:
- return
-
- # The RequestSpec stores scheduler_hints as key=list pairs so we need
- # to check the type on the value and pull the single entry out. The
- # API request schema validates that the 'group' hint is a single value.
- if isinstance(group_hint, list):
- group_hint = group_hint[0]
+ if scheduler_hints is not None:
+ # only go through here if scheduler_hints is provided, even if it
+ # is empty.
+ group_hint = scheduler_hints.get('group')
+ if not group_hint:
+ return
+ else:
+ # The RequestSpec stores scheduler_hints as key=list pairs so
+ # we need to check the type on the value and pull the single
+ # entry out. The API request schema validates that
+ # the 'group' hint is a single value.
+ if isinstance(group_hint, list):
+ group_hint = group_hint[0]
+
+ group = objects.InstanceGroup.get_by_hint(context, group_hint)
+ else:
+ # TODO(ganso): a call to DB can be saved by adding request_spec
+ # to rpcapi payload of live_migration, pre_live_migration and
+ # check_can_live_migrate_destination
+ try:
+ group = objects.InstanceGroup.get_by_instance_uuid(
+ context, instance.uuid)
+ except exception.InstanceGroupNotFound:
+ return
- @utils.synchronized(group_hint)
- def _do_validation(context, instance, group_hint):
- group = objects.InstanceGroup.get_by_hint(context, group_hint)
+ @utils.synchronized(group['uuid'])
+ def _do_validation(context, instance, group):
if group.policy and 'anti-affinity' == group.policy:
+
+ # instances on host
instances_uuids = objects.InstanceList.get_uuids_by_host(
context, self.host)
ins_on_host = set(instances_uuids)
+
+ # instance param is just for logging, the nodename obtained is
+ # not actually related to the instance at all
+ nodename = self._get_nodename(instance)
+
+ # instances being migrated to host
+ migrations = (
+ objects.MigrationList.get_in_progress_by_host_and_node(
+ context, self.host, nodename))
+ migration_vm_uuids = set([mig['instance_uuid']
+ for mig in migrations])
+
+ total_instances = migration_vm_uuids | ins_on_host
+
+ # refresh group to get updated members within locked block
+ group = objects.InstanceGroup.get_by_uuid(context,
+ group['uuid'])
members = set(group.members)
# Determine the set of instance group members on this host
# which are not the instance in question. This is used to
# determine how many other members from the same anti-affinity
# group can be on this host.
- members_on_host = ins_on_host & members - set([instance.uuid])
+ members_on_host = (total_instances & members -
+ set([instance.uuid]))
rules = group.rules
if rules and 'max_server_per_host' in rules:
max_server = rules['max_server_per_host']
@@ -1623,6 +1668,12 @@ class ComputeManager(manager.Manager):
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
+
+ # NOTE(ganso): The check for affinity below does not work and it
+ # can easily be violated because the lock happens in different
+ # compute hosts.
+ # The only fix seems to be a DB lock to perform the check whenever
+ # setting the host field to an instance.
elif group.policy and 'affinity' == group.policy:
group_hosts = group.get_hosts(exclude=[instance.uuid])
if group_hosts and self.host not in group_hosts:
@@ -1631,8 +1682,7 @@ class ComputeManager(manager.Manager):
instance_uuid=instance.uuid,
reason=msg)
- if not CONF.workarounds.disable_group_policy_check_upcall:
- _do_validation(context, instance, group_hint)
+ _do_validation(context, instance, group)
def _log_original_error(self, exc_info, instance_uuid):
LOG.error('Error: %s', exc_info[1], instance_uuid=instance_uuid,
@@ -4766,10 +4816,24 @@ class ComputeManager(manager.Manager):
with self._error_out_instance_on_exception(
context, instance, instance_state=instance_state),\
errors_out_migration_ctxt(migration):
+
self._send_prep_resize_notifications(
context, instance, fields.NotificationPhase.START,
instance_type)
try:
+ scheduler_hints = self._get_scheduler_hints(filter_properties,
+ request_spec)
+ # Error out if this host cannot accept the new instance due
+ # to anti-affinity. At this point the migration is already
+ # in-progress, so this is the definitive moment to abort due to
+ # the policy violation. Also, exploding here is covered by the
+ # cleanup methods in except block.
+ try:
+ self._validate_instance_group_policy(context, instance,
+ scheduler_hints)
+ except exception.RescheduledException as e:
+ raise exception.InstanceFaultRollback(inner_exception=e)
+
self._prep_resize(context, image, instance,
instance_type, filter_properties,
node, migration, request_spec,
@@ -6455,9 +6519,33 @@ class ComputeManager(manager.Manager):
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance,
new_attachment_id):
- """Swap volume for an instance."""
- context = context.elevated()
+ """Replace the old volume with the new volume within the active server
+ :param context: User request context
+ :param old_volume_id: Original volume id
+ :param new_volume_id: New volume id being swapped to
+ :param instance: Instance with original_volume_id attached
+ :param new_attachment_id: ID of the new attachment for new_volume_id
+ """
+ @utils.synchronized(instance.uuid)
+ def _do_locked_swap_volume(context, old_volume_id, new_volume_id,
+ instance, new_attachment_id):
+ self._do_swap_volume(context, old_volume_id, new_volume_id,
+ instance, new_attachment_id)
+ _do_locked_swap_volume(context, old_volume_id, new_volume_id, instance,
+ new_attachment_id)
+
+ def _do_swap_volume(self, context, old_volume_id, new_volume_id,
+ instance, new_attachment_id):
+ """Replace the old volume with the new volume within the active server
+
+ :param context: User request context
+ :param old_volume_id: Original volume id
+ :param new_volume_id: New volume id being swapped to
+ :param instance: Instance with original_volume_id attached
+ :param new_attachment_id: ID of the new attachment for new_volume_id
+ """
+ context = context.elevated()
compute_utils.notify_about_volume_swap(
context, instance, self.host,
fields.NotificationPhase.START,
@@ -6780,6 +6868,20 @@ class ComputeManager(manager.Manager):
:param limits: objects.SchedulerLimits object for this live migration.
:returns: a LiveMigrateData object (hypervisor-dependent)
"""
+
+ # Error out if this host cannot accept the new instance due
+ # to anti-affinity. This check at this moment is not very accurate, as
+ # multiple requests may be happening concurrently and miss the lock,
+ # but when it works it provides a better user experience by failing
+ # earlier. Also, it should be safe to explode here, error becomes
+ # NoValidHost and instance status remains ACTIVE.
+ try:
+ self._validate_instance_group_policy(ctxt, instance)
+ except exception.RescheduledException as e:
+ msg = ("Failed to validate instance group policy "
+ "due to: {}".format(e))
+ raise exception.MigrationPreCheckError(reason=msg)
+
src_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, instance.host))
dst_compute_info = obj_base.obj_to_primitive(
@@ -6801,15 +6903,18 @@ class ComputeManager(manager.Manager):
LOG.info('Destination was ready for NUMA live migration, '
'but source is either too old, or is set to an '
'older upgrade level.', instance=instance)
- # Create migrate_data vifs
- migrate_data.vifs = \
- migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
- instance.get_network_info())
- # Claim PCI devices for VIFs on destination (if needed)
- port_id_to_pci = self._claim_pci_for_instance_vifs(ctxt, instance)
- # Update migrate VIFs with the newly claimed PCI devices
- self._update_migrate_vifs_profile_with_pci(migrate_data.vifs,
- port_id_to_pci)
+ if self.network_api.supports_port_binding_extension(ctxt):
+ # Create migrate_data vifs
+ migrate_data.vifs = \
+ migrate_data_obj.\
+ VIFMigrateData.create_skeleton_migrate_vifs(
+ instance.get_network_info())
+ # Claim PCI devices for VIFs on destination (if needed)
+ port_id_to_pci = self._claim_pci_for_instance_vifs(
+ ctxt, instance)
+ # Update migrate VIFs with the newly claimed PCI devices
+ self._update_migrate_vifs_profile_with_pci(
+ migrate_data.vifs, port_id_to_pci)
finally:
self.driver.cleanup_live_migration_destination_check(ctxt,
dest_check_data)
@@ -6915,6 +7020,13 @@ class ComputeManager(manager.Manager):
"""
LOG.debug('pre_live_migration data is %s', migrate_data)
+ # Error out if this host cannot accept the new instance due
+ # to anti-affinity. At this point the migration is already in-progress,
+ # so this is the definitive moment to abort due to the policy
+ # violation. Also, it should be safe to explode here. The instance
+ # status remains ACTIVE, migration status failed.
+ self._validate_instance_group_policy(context, instance)
+
migrate_data.old_vol_attachment_ids = {}
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -6977,8 +7089,12 @@ class ComputeManager(manager.Manager):
# determine if it should wait for a 'network-vif-plugged' event
# from neutron before starting the actual guest transfer in the
# hypervisor
+ using_multiple_port_bindings = (
+ 'vifs' in migrate_data and migrate_data.vifs)
migrate_data.wait_for_vif_plugged = (
- CONF.compute.live_migration_wait_for_vif_plug)
+ CONF.compute.live_migration_wait_for_vif_plug and
+ using_multiple_port_bindings
+ )
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
@@ -7038,8 +7154,8 @@ class ComputeManager(manager.Manager):
# We don't generate events if CONF.vif_plugging_timeout=0
# meaning that the operator disabled using them.
if CONF.vif_plugging_timeout and utils.is_neutron():
- return [('network-vif-plugged', vif['id'])
- for vif in instance.get_network_info()]
+ return (instance.get_network_info()
+ .get_live_migration_plug_time_events())
else:
return []
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index fccebabb37..6713f61d47 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -946,10 +946,16 @@ on compute host B.
The configured maximum is not enforced on shelved offloaded servers, as they
have no compute host.
+.. warning:: If this option is set to 0, the ``nova-compute`` service will fail
+ to start, as 0 disk devices is an invalid configuration that would
+ prevent instances from being able to boot.
+
Possible values:
* -1 means unlimited
-* Any integer >= 0 represents the maximum allowed
+* Any integer >= 1 represents the maximum allowed. A value of 0 will cause the
+ ``nova-compute`` service to fail to start, as 0 disk devices is an invalid
+ configuration that would prevent instances from being able to boot.
"""),
]
diff --git a/nova/conf/workarounds.py b/nova/conf/workarounds.py
index c9ed0f2e4b..20e4b5c9d6 100644
--- a/nova/conf/workarounds.py
+++ b/nova/conf/workarounds.py
@@ -266,6 +266,30 @@ Related options:
* :oslo.config:option:`image_cache_subdirectory_name`
* :oslo.config:option:`update_resources_interval`
"""),
+ cfg.BoolOpt(
+ 'never_download_image_if_on_rbd',
+ default=False,
+ help="""
+When booting from an image on a ceph-backed compute node, if the image does not
+already reside on the ceph cluster (as would be the case if glance is
+also using the same cluster), nova will download the image from glance and
+upload it to ceph itself. If using multiple ceph clusters, this may cause nova
+to unintentionally duplicate the image in a non-COW-able way in the local
+ceph deployment, wasting space.
+
+For more information, refer to the bug report:
+
+https://bugs.launchpad.net/nova/+bug/1858877
+
+Enabling this option will cause nova to *refuse* to boot an instance if it
+would require downloading the image from glance and uploading it to ceph
+itself.
+
+Related options:
+
+* ``compute_driver`` (libvirt)
+* ``[libvirt]/images_type`` (rbd)
+"""),
]
diff --git a/nova/console/websocketproxy.py b/nova/console/websocketproxy.py
index e13b3c0fe1..7641a7cc08 100644
--- a/nova/console/websocketproxy.py
+++ b/nova/console/websocketproxy.py
@@ -19,6 +19,7 @@ Leverages websockify.py by Joel Martin
'''
import copy
+import os
import socket
import sys
@@ -305,6 +306,22 @@ class NovaProxyRequestHandler(NovaProxyRequestHandlerBase,
# Fall back to the websockify <= v0.8.0 'socket' method location.
return websockify.WebSocketServer.socket(*args, **kwargs)
+ def send_head(self):
+ # This code is copied from this example patch:
+ # https://bugs.python.org/issue32084#msg306545
+ path = self.translate_path(self.path)
+ if os.path.isdir(path):
+ parts = urlparse.urlsplit(self.path)
+ if not parts.path.endswith('/'):
+ # Browsers interpret "Location: //uri" as an absolute URI
+ # like "http://URI"
+ if self.path.startswith('//'):
+ self.send_error(400,
+ "URI must not start with //")
+ return None
+
+ return super(NovaProxyRequestHandler, self).send_head()
+
class NovaWebSocketProxy(websockify.WebSocketProxy):
def __init__(self, *args, **kwargs):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 02311f240d..d86b253790 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -48,6 +48,7 @@ from sqlalchemy.orm import aliased
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import noload
+from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import undefer
from sqlalchemy.schema import Table
from sqlalchemy import sql
@@ -1930,13 +1931,27 @@ def _build_instance_get(context, columns_to_join=None):
continue
if 'extra.' in column:
query = query.options(undefer(column))
+ elif column in ['metadata', 'system_metadata']:
+ # NOTE(melwitt): We use subqueryload() instead of joinedload() for
+ # metadata and system_metadata because of the one-to-many
+ # relationship of the data. Directly joining these columns can
+ # result in a large number of additional rows being queried if an
+ # instance has a large number of (system_)metadata items, resulting
+ # in a large data transfer. Instead, the subqueryload() will
+ # perform additional queries to obtain metadata and system_metadata
+ # for the instance.
+ query = query.options(subqueryload(column))
else:
query = query.options(joinedload(column))
# NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
- return query
+ # NOTE(melwitt): We need to use order_by(<unique column>) so that the
+ # additional queries emitted by subqueryload() include the same ordering as
+ # used by the parent query.
+ # https://docs.sqlalchemy.org/en/13/orm/loading_relationships.html#the-importance-of-ordering
+ return query.order_by(models.Instance.id)
def _instances_fill_metadata(context, instances, manual_joins=None):
@@ -5578,7 +5593,11 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
# NOTE(jake): instance_actions_events doesn't have a instance_uuid column
# but still needs to be archived as it is a FK constraint
if ((max_rows is None or rows_archived < max_rows) and
- ('instance_uuid' in columns or
+ # NOTE(melwitt): The pci_devices table uses the 'instance_uuid'
+ # column to track the allocated association of a PCI device and its
+ # records are not tied to the lifecycles of instance records.
+ (tablename != 'pci_devices' and
+ 'instance_uuid' in columns or
tablename == 'instance_actions_events')):
instances = models.BASE.metadata.tables['instances']
limit = max_rows - rows_archived if max_rows is not None else None
diff --git a/nova/exception.py b/nova/exception.py
index 7a5c523933..d9533fe4c3 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -620,6 +620,11 @@ class ImageBadRequest(Invalid):
"%(response)s")
+class ImageQuotaExceeded(NovaException):
+ msg_fmt = _("Quota exceeded or out of space for image %(image_id)s "
+ "in the image service.")
+
+
class InstanceUnacceptable(Invalid):
msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
@@ -2541,7 +2546,7 @@ class PMEMNamespaceConfigInvalid(NovaException):
"please check your conf file. ")
-class GetPMEMNamespaceFailed(NovaException):
+class GetPMEMNamespacesFailed(NovaException):
msg_fmt = _("Get PMEM namespaces on host failed: %(reason)s.")
diff --git a/nova/image/glance.py b/nova/image/glance.py
index b2c5f9ae05..5c25bd4e41 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -938,6 +938,8 @@ def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.ImageBadRequest(image_id=image_id,
response=six.text_type(exc_value))
+ if isinstance(exc_value, glanceclient.exc.HTTPOverLimit):
+ return exception.ImageQuotaExceeded(image_id=image_id)
return exc_value
diff --git a/nova/network/model.py b/nova/network/model.py
index d8119fae72..7ed9d2d1b8 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -469,6 +469,14 @@ class VIF(Model):
return (self.is_hybrid_plug_enabled() and not
migration.is_same_host())
+ @property
+ def has_live_migration_plug_time_event(self):
+ """Returns whether this VIF's network-vif-plugged external event will
+ be sent by Neutron at "plugtime" - in other words, as soon as neutron
+ completes configuring the network backend.
+ """
+ return self.is_hybrid_plug_enabled()
+
def is_hybrid_plug_enabled(self):
return self['details'].get(VIF_DETAILS_OVS_HYBRID_PLUG, False)
@@ -530,15 +538,22 @@ class NetworkInfo(list):
return jsonutils.dumps(self)
def get_bind_time_events(self, migration):
- """Returns whether any of our VIFs have "bind-time" events. See
- has_bind_time_event() docstring for more details.
+ """Returns a list of external events for any VIFs that have
+ "bind-time" events during cold migration.
"""
return [('network-vif-plugged', vif['id'])
for vif in self if vif.has_bind_time_event(migration)]
+ def get_live_migration_plug_time_events(self):
+ """Returns a list of external events for any VIFs that have
+ "plug-time" events during live migration.
+ """
+ return [('network-vif-plugged', vif['id'])
+ for vif in self if vif.has_live_migration_plug_time_event]
+
def get_plug_time_events(self, migration):
- """Complementary to get_bind_time_events(), any event that does not
- fall in that category is a plug-time event.
+ """Returns a list of external events for any VIFs that have
+ "plug-time" events during cold migration.
"""
return [('network-vif-plugged', vif['id'])
for vif in self if not vif.has_bind_time_event(migration)]
diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py
index 8f85a748f3..8508e63d78 100644
--- a/nova/network/neutronv2/api.py
+++ b/nova/network/neutronv2/api.py
@@ -747,9 +747,15 @@ class API(base_api.NetworkAPI):
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
+ # NOTE(slaweq): fields other than name and id aren't really needed
+ # so asking only about those fields will allow Neutron to not
+ # prepare list of rules for each found security group. That may
+ # speed processing of this request a lot in case when tenant has
+ # got many security groups
+ sg_fields = ['id', 'name']
search_opts = {'tenant_id': instance.project_id}
user_security_groups = neutron.list_security_groups(
- **search_opts).get('security_groups')
+ fields=sg_fields, **search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
diff --git a/nova/network/neutronv2/constants.py b/nova/network/neutronv2/constants.py
index 1bb4605c9f..6a5e6b8e0e 100644
--- a/nova/network/neutronv2/constants.py
+++ b/nova/network/neutronv2/constants.py
@@ -19,6 +19,7 @@ VNIC_INDEX_EXT = 'VNIC Index'
DNS_INTEGRATION = 'DNS Integration'
MULTI_NET_EXT = 'Multi Provider Network'
SUBSTR_PORT_FILTERING = 'IP address substring filtering'
+PORT_BINDING = 'Port Binding'
PORT_BINDING_EXTENDED = 'Port Bindings Extended'
LIVE_MIGRATION = 'live-migration'
DEFAULT_SECGROUP = 'default'
diff --git a/nova/objects/instance_mapping.py b/nova/objects/instance_mapping.py
index 0392f04770..8b5f6ba92e 100644
--- a/nova/objects/instance_mapping.py
+++ b/nova/objects/instance_mapping.py
@@ -15,6 +15,7 @@ import collections
from oslo_log import log as logging
from oslo_utils import versionutils
import six
+from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import false
from sqlalchemy.sql import func
@@ -161,8 +162,16 @@ class InstanceMapping(base.NovaTimestampObject, base.NovaObject):
def save(self):
changes = self.obj_get_changes()
changes = self._update_with_cell_id(changes)
- db_mapping = self._save_in_db(self._context, self.instance_uuid,
- changes)
+ try:
+ db_mapping = self._save_in_db(self._context, self.instance_uuid,
+ changes)
+ except orm_exc.StaleDataError:
+ # NOTE(melwitt): If the instance mapping has been deleted out from
+ # under us by conductor (delete requested while booting), we will
+ # encounter a StaleDataError after we retrieved the row and try to
+ # update it after it's been deleted. We can treat this like an
+ # instance mapping not found and allow the caller to handle it.
+ raise exception.InstanceMappingNotFound(uuid=self.instance_uuid)
self._from_db_object(self._context, self, db_mapping)
self.obj_reset_changes()
diff --git a/nova/tests/functional/db/test_archive.py b/nova/tests/functional/db/test_archive.py
index 79edf0b96a..bc463c7ecc 100644
--- a/nova/tests/functional/db/test_archive.py
+++ b/nova/tests/functional/db/test_archive.py
@@ -137,6 +137,19 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
# Verify we have some system_metadata since we'll check that later.
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
+ # Create a pci_devices record to simulate an instance that had a PCI
+ # device allocated at the time it was deleted. There is a window of
+ # time between deletion of the instance record and freeing of the PCI
+ # device in nova-compute's _complete_deletion method during RT update.
+ db.pci_device_update(admin_context, 1, 'fake-address',
+ {'compute_node_id': 1,
+ 'address': 'fake-address',
+ 'vendor_id': 'fake',
+ 'product_id': 'fake',
+ 'dev_type': 'fake',
+ 'label': 'fake',
+ 'status': 'allocated',
+ 'instance_uuid': instance.uuid})
# Now try and archive the soft deleted records.
results, deleted_instance_uuids, archived = \
db.archive_deleted_rows(max_rows=100)
@@ -151,6 +164,8 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
self.assertIn('instance_actions', results)
self.assertIn('instance_actions_events', results)
self.assertEqual(sum(results.values()), archived)
+ # Verify that the pci_devices record has not been dropped
+ self.assertNotIn('pci_devices', results)
def _get_table_counts(self):
engine = sqlalchemy_api.get_engine()
diff --git a/nova/tests/functional/regressions/test_bug_1888395.py b/nova/tests/functional/regressions/test_bug_1888395.py
new file mode 100644
index 0000000000..69576fd094
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1888395.py
@@ -0,0 +1,163 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+import mock
+
+from lxml import etree
+import six.moves.urllib.parse as urlparse
+
+from nova import context
+from nova.network.neutronv2 import api as neutron
+from nova.network.neutronv2 import constants as neutron_constants
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base as libvirt_base
+from nova.tests.unit.virt.libvirt import fake_os_brick_connector
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova.virt.libvirt import guest as libvirt_guest
+
+
+class TestLiveMigrationWithoutMultiplePortBindings(
+ integrated_helpers.InstanceHelperMixin,
+ libvirt_base.ServersTestBase):
+ """Regression test for bug 1888395.
+
+ This regression test asserts that Live migration works when
+ neutron does not support the binding-extended api extension
+ and the legacy single port binding workflow is used.
+ """
+
+ ADMIN_API = True
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+
+ def list_extensions(self, *args, **kwargs):
+ return {
+ 'extensions': [
+ {
+ # Copied from neutron-lib portbindings.py
+ "updated": "2014-02-03T10:00:00-00:00",
+ "name": neutron_constants.PORT_BINDING,
+ "links": [],
+ "alias": "binding",
+ "description": "Expose port bindings of a virtual port to "
+ "external application"
+ }
+ ]
+ }
+
+ def setUp(self):
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+ super(TestLiveMigrationWithoutMultiplePortBindings, self).setUp()
+ self.neutron.list_extensions = self.list_extensions
+ self.neutron_api = neutron.API()
+ # TODO(sean-k-mooney): remove after
+ # I275509eb0e0eb9eaf26fe607b7d9a67e1edc71f8
+ # has merged.
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.connector',
+ fake_os_brick_connector))
+
+ self.computes = {}
+ for host in ['start_host', 'end_host']:
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=2,
+ kB_mem=10740000)
+ fake_connection = self._get_connection(
+ host_info=host_info, hostname=host)
+
+ # This is fun. Firstly we need to do a global'ish mock so we can
+ # actually start the service.
+ with mock.patch('nova.virt.libvirt.host.Host.get_connection',
+ return_value=fake_connection):
+ compute = self.start_service('compute', host=host)
+
+ # Once that's done, we need to do some tweaks to each individual
+ # compute "service" to make sure they return unique objects
+ compute.driver._host.get_connection = lambda: fake_connection
+ self.computes[host] = compute
+
+ self.ctxt = context.get_admin_context()
+ # TODO(sean-k-mooney): remove this when it is part of ServersTestBase
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.tests.unit.virt.libvirt.fakelibvirt.Domain.migrateToURI3',
+ self._migrate_stub))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.migration._update_serial_xml',
+ self._update_serial_xml_stub))
+
+ def _update_serial_xml_stub(self, xml_doc, migrate_data):
+ return xml_doc
+
+ def _migrate_stub(self, domain, destination, params, flags):
+ """Stub out migrateToURI3."""
+
+ src_hostname = domain._connection.hostname
+ dst_hostname = urlparse.urlparse(destination).netloc
+
+ # In a real live migration, libvirt and QEMU on the source and
+ # destination talk it out, resulting in the instance starting to exist
+ # on the destination. Fakelibvirt cannot do that, so we have to
+ # manually create the "incoming" instance on the destination
+ # fakelibvirt.
+ dst = self.computes[dst_hostname]
+ dst.driver._host.get_connection().createXML(
+ params['destination_xml'],
+ 'fake-createXML-doesnt-care-about-flags')
+
+ src = self.computes[src_hostname]
+ conn = src.driver._host.get_connection()
+
+ # because migrateToURI3 is spawned in a background thread, this method
+ # does not block the upper nova layers. Because we don't want nova to
+ # think the live migration has finished until this method is done, the
+ # last thing we do is make fakelibvirt's Domain.jobStats() return
+ # VIR_DOMAIN_JOB_COMPLETED.
+ server = etree.fromstring(
+ params['destination_xml']
+ ).find('./uuid').text
+ dom = conn.lookupByUUIDString(server)
+ dom.complete_job()
+
+ @mock.patch('nova.virt.libvirt.guest.Guest.get_job_info')
+ def test_live_migrate(self, mock_get_job_info):
+ mock_get_job_info.return_value = libvirt_guest.JobInfo(
+ type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED)
+ flavors = self.api.get_flavors()
+ flavor = flavors[0]
+ server_req = self._build_minimal_create_server_request(
+ self.api, 'some-server', flavor_id=flavor['id'],
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ networks=[{'port': self.neutron.port_1['id']}])
+ server_req['availability_zone'] = 'nova:%s' % "start_host"
+ created_server = self.api.post_server({'server': server_req})
+ server = self._wait_for_state_change(
+ self.api, created_server, 'ACTIVE')
+ self.assertFalse(
+ self.neutron_api.supports_port_binding_extension(self.ctxt))
+ # TODO(sean-k-mooney): extend _live_migrate to support passing a host
+ self.api.post_server_action(
+ server['id'],
+ {
+ 'os-migrateLive': {
+ 'host': 'end_host',
+ 'block_migration': 'auto'
+ }
+ }
+ )
+
+ self._wait_for_server_parameter(
+ self.api, server,
+ {'OS-EXT-SRV-ATTR:host': 'end_host', 'status': 'ACTIVE'})
+
+ msg = "NotImplementedError: Cannot load 'vif_type' in the base class"
+ self.assertNotIn(msg, self.stdlog.logger.output)
diff --git a/nova/tests/functional/test_monkey_patch.py b/nova/tests/functional/test_monkey_patch.py
new file mode 100644
index 0000000000..b471d333cf
--- /dev/null
+++ b/nova/tests/functional/test_monkey_patch.py
@@ -0,0 +1,45 @@
+# Copyright 2020 Red Hat, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(artom) This file exists to test eventlet monkeypatching. How and what
+# eventlet monkeypatches can be controlled by environment variables that
+# are processed by eventlet at import-time (for exmaple, EVENTLET_NO_GREENDNS).
+# Nova manages all of this in nova.monkey_patch. Therefore, nova.monkey_patch
+# must be the first thing to import eventlet. As nova.tests.functional.__init__
+# imports nova.monkey_patch, we're OK here.
+
+import socket
+import traceback
+
+from nova import test
+
+
+class TestMonkeyPatch(test.TestCase):
+
+ def test_greendns_is_disabled(self):
+ """Try to resolve a fake fqdn. If we see greendns mentioned in the
+ traceback of the raised exception, it means we've not actually disabled
+ greendns. See the TODO and NOTE in nova.monkey_patch to understand why
+ greendns needs to be disabled.
+ """
+ raised = False
+ try:
+ socket.gethostbyname('goat.fake')
+ except Exception:
+ tb = traceback.format_exc()
+ # NOTE(artom) If we've correctly disabled greendns, we expect the
+ # traceback to not contain any reference to it.
+ self.assertNotIn('greendns.py', tb)
+ raised = True
+ self.assertTrue(raised)
diff --git a/nova/tests/unit/api/openstack/compute/test_shelve.py b/nova/tests/unit/api/openstack/compute/test_shelve.py
index 8d41490d86..e51729935b 100644
--- a/nova/tests/unit/api/openstack/compute/test_shelve.py
+++ b/nova/tests/unit/api/openstack/compute/test_shelve.py
@@ -54,6 +54,7 @@ class ShelvePolicyTestV21(test.NoDBTestCase):
self.req.environ['nova.context'],
vm_state=vm_states.ACTIVE, task_state=None)
instance.launched_at = instance.created_at
+ instance.system_metadata = {}
get_instance_mock.return_value = instance
mock_save.side_effect = exception.UnexpectedTaskStateError(
instance_uuid=instance.uuid, expected=None,
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 77f23c30fe..dc3a49e5a1 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -18,6 +18,7 @@
"""Tests for compute service."""
import datetime
+import fixtures as std_fixtures
from itertools import chain
import operator
import sys
@@ -6142,9 +6143,15 @@ class ComputeTestCase(BaseTestCase,
# Confirm setup_compute_volume is called when volume is mounted.
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self)
+
if CONF.use_neutron:
self.stub_out(
'nova.network.neutronv2.api.API.get_instance_nw_info', stupid)
+ self.useFixture(
+ std_fixtures.MonkeyPatch(
+ 'nova.network.neutronv2.api.'
+ 'API.supports_port_binding_extension',
+ lambda *args: True))
else:
self.stub_out('nova.network.api.API.get_instance_nw_info', stupid)
@@ -6155,6 +6162,9 @@ class ComputeTestCase(BaseTestCase,
fake_notifier.NOTIFICATIONS = []
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False)
+ vifs = migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
+ stupid())
+ migrate_data.vifs = vifs
mock_pre.return_value = migrate_data
with mock.patch.object(self.compute.network_api,
@@ -9133,6 +9143,7 @@ class ComputeAPITestCase(BaseTestCase):
'image_ramdisk_id': uuids.ramdisk_id,
'image_something_else': 'meow',
'preserved': 'preserve this!',
+ 'image_base_image_ref': image_ref,
'boot_roles': ''},
sys_meta)
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 67dc20f6bd..526302de8a 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -15,6 +15,7 @@
import contextlib
import copy
import datetime
+import fixtures as std_fixtures
import time
from cinderclient import exceptions as cinder_exception
@@ -963,6 +964,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_error_interrupted.assert_called_once_with(
self.context, {active_instance.uuid, evacuating_instance.uuid})
+ def test_init_host_disk_devices_configuration_failure(self):
+ self.flags(max_disk_devices_to_attach=0, group='compute')
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute.init_host)
+
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
@@ -3190,22 +3196,80 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_event.assert_called_once_with(
self.context, 'compute_check_can_live_migrate_destination',
CONF.host, instance.uuid)
+ return result
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_success(self):
+ self.useFixture(std_fixtures.MonkeyPatch(
+ 'nova.network.neutronv2.api.API.supports_port_binding_extension',
+ lambda *args: True))
self._test_check_can_live_migrate_destination()
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_fail(self):
+ self.useFixture(std_fixtures.MonkeyPatch(
+ 'nova.network.neutronv2.api.API.supports_port_binding_extension',
+ lambda *args: True))
self.assertRaises(
- test.TestingException,
- self._test_check_can_live_migrate_destination,
- do_raise=True)
-
+ test.TestingException,
+ self._test_check_can_live_migrate_destination,
+ do_raise=True)
+
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
+ def test_check_can_live_migrate_destination_contains_vifs(self):
+ self.useFixture(std_fixtures.MonkeyPatch(
+ 'nova.network.neutronv2.api.API.supports_port_binding_extension',
+ lambda *args: True))
+ migrate_data = self._test_check_can_live_migrate_destination()
+ self.assertIn('vifs', migrate_data)
+ self.assertIsNotNone(migrate_data.vifs)
+
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
+ def test_check_can_live_migrate_destination_no_binding_extended(self):
+ self.useFixture(std_fixtures.MonkeyPatch(
+ 'nova.network.neutronv2.api.API.supports_port_binding_extension',
+ lambda *args: False))
+ migrate_data = self._test_check_can_live_migrate_destination()
+ self.assertNotIn('vifs', migrate_data)
+
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_src_numa_lm_false(self):
+ self.useFixture(std_fixtures.MonkeyPatch(
+ 'nova.network.neutronv2.api.API.supports_port_binding_extension',
+ lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=False)
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_src_numa_lm_true(self):
+ self.useFixture(std_fixtures.MonkeyPatch(
+ 'nova.network.neutronv2.api.API.supports_port_binding_extension',
+ lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=True)
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ def test_check_can_live_migrate_destination_fail_group_policy(
+ self, mock_fail_db):
+
+ instance = fake_instance.fake_instance_obj(
+ self.context, host=self.compute.host, vm_state=vm_states.ACTIVE,
+ node='fake-node')
+
+ ex = exception.RescheduledException(
+ instance_uuid=instance.uuid, reason="policy violated")
+
+ with mock.patch.object(self.compute, '_validate_instance_group_policy',
+ side_effect=ex):
+ self.assertRaises(
+ exception.MigrationPreCheckError,
+ self.compute.check_can_live_migrate_destination,
+ self.context, instance, None, None, None, None)
+
def test_dest_can_numa_live_migrate(self):
positive_dest_check_data = objects.LibvirtLiveMigrateData(
dst_supports_numa_live_migration=True)
@@ -6945,7 +7009,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def test_validate_policy_honors_workaround_disabled(self, mock_get):
instance = objects.Instance(uuid=uuids.instance)
hints = {'group': 'foo'}
- mock_get.return_value = objects.InstanceGroup(policy=None)
+ mock_get.return_value = objects.InstanceGroup(policy=None,
+ uuid=uuids.group)
self.compute._validate_instance_group_policy(self.context,
instance, hints)
mock_get.assert_called_once_with(self.context, 'foo')
@@ -6971,10 +7036,14 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance, hints)
mock_get.assert_called_once_with(self.context, uuids.group_hint)
+ @mock.patch('nova.objects.InstanceGroup.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@mock.patch('nova.objects.InstanceGroup.get_by_hint')
- def test_validate_instance_group_policy_with_rules(self, mock_get_by_hint,
- mock_get_by_host):
+ @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
+ @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
+ def test_validate_instance_group_policy_with_rules(
+ self, migration_list, nodes, mock_get_by_hint, mock_get_by_host,
+ mock_get_by_uuid):
# Create 2 instance in same host, inst2 created before inst1
instance = objects.Instance(uuid=uuids.inst1)
hints = {'group': [uuids.group_hint]}
@@ -6983,17 +7052,26 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_get_by_host.return_value = existing_insts
# if group policy rules limit to 1, raise RescheduledException
- mock_get_by_hint.return_value = objects.InstanceGroup(
+ group = objects.InstanceGroup(
policy='anti-affinity', rules={'max_server_per_host': '1'},
- hosts=['host1'], members=members_uuids)
+ hosts=['host1'], members=members_uuids,
+ uuid=uuids.group)
+ mock_get_by_hint.return_value = group
+ mock_get_by_uuid.return_value = group
+ nodes.return_value = ['nodename']
+ migration_list.return_value = [objects.Migration(
+ uuid=uuids.migration, instance_uuid=uuids.instance)]
self.assertRaises(exception.RescheduledException,
self.compute._validate_instance_group_policy,
self.context, instance, hints)
# if group policy rules limit change to 2, validate OK
- mock_get_by_hint.return_value = objects.InstanceGroup(
+ group2 = objects.InstanceGroup(
policy='anti-affinity', rules={'max_server_per_host': 2},
- hosts=['host1'], members=members_uuids)
+ hosts=['host1'], members=members_uuids,
+ uuid=uuids.group)
+ mock_get_by_hint.return_value = group2
+ mock_get_by_uuid.return_value = group2
self.compute._validate_instance_group_policy(self.context,
instance, hints)
@@ -8520,6 +8598,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
manager.ComputeManager()
mock_executor.assert_called_once_with()
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_pre_live_migration_cinder_v3_api(self):
# This tests that pre_live_migration with a bdm with an
# attachment_id, will create a new attachment and update
@@ -8597,6 +8677,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
_test()
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_pre_live_migration_exception_cinder_v3_api(self):
# The instance in this test has 2 attachments. The second attach_create
# will throw an exception. This will test that the first attachment
@@ -8666,6 +8748,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertGreater(len(m.mock_calls), 0)
_test()
+ @mock.patch('nova.objects.InstanceGroup.get_by_instance_uuid', mock.Mock(
+ side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_pre_live_migration_exceptions_delete_attachments(self):
# The instance in this test has 2 attachments. The call to
# driver.pre_live_migration will raise an exception. This will test
@@ -8747,6 +8831,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.flags(vif_plugging_timeout=300, use_neutron=True)
self.assertEqual(
[], self.compute._get_neutron_events_for_live_migration([]))
+ # 4. no plug time events
+ with mock.patch.object(self.instance, 'get_network_info') as nw_info:
+ nw_info.return_value = network_model.NetworkInfo(
+ [network_model.VIF(
+ uuids.port1, details={
+ network_model.VIF_DETAILS_OVS_HYBRID_PLUG: False})])
+ self.assertFalse(nw_info.return_value[0].is_hybrid_plug_enabled())
+ self.assertEqual(
+ [], self.compute._get_neutron_events_for_live_migration(
+ self.instance))
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._post_live_migration')
@@ -8761,9 +8855,11 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
wait_for_vif_plugged=True)
mock_get_bdms.return_value = objects.BlockDeviceMappingList(objects=[])
mock_pre_live_mig.return_value = migrate_data
+ details = {network_model.VIF_DETAILS_OVS_HYBRID_PLUG: True}
self.instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
- network_model.VIF(uuids.port1), network_model.VIF(uuids.port2)
+ network_model.VIF(uuids.port1, details=details),
+ network_model.VIF(uuids.port2, details=details)
]))
self.compute._waiting_live_migrations[self.instance.uuid] = (
self.migration, mock.MagicMock()
@@ -8793,11 +8889,12 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
of not waiting.
"""
migrate_data = objects.LibvirtLiveMigrateData()
+ details = {network_model.VIF_DETAILS_OVS_HYBRID_PLUG: True}
mock_get_bdms.return_value = objects.BlockDeviceMappingList(objects=[])
mock_pre_live_mig.return_value = migrate_data
self.instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
- network_model.VIF(uuids.port1)]))
+ network_model.VIF(uuids.port1, details=details)]))
self.compute._waiting_live_migrations[self.instance.uuid] = (
self.migration, mock.MagicMock()
)
@@ -8941,9 +9038,11 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock_get_bdms.return_value = source_bdms
migrate_data = objects.LibvirtLiveMigrateData(
wait_for_vif_plugged=True)
+ details = {network_model.VIF_DETAILS_OVS_HYBRID_PLUG: True}
self.instance.info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo([
- network_model.VIF(uuids.port1), network_model.VIF(uuids.port2)
+ network_model.VIF(uuids.port1, details=details),
+ network_model.VIF(uuids.port2, details=details)
]))
self.compute._waiting_live_migrations = {}
fake_migration = objects.Migration(
@@ -10002,6 +10101,54 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
# (_error_out_instance_on_exception will set to ACTIVE by default).
self.assertEqual(vm_states.STOPPED, instance.vm_state)
+ @mock.patch('nova.compute.utils.notify_usage_exists')
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_notify_about_instance_usage')
+ @mock.patch('nova.compute.utils.notify_about_resize_prep_instance')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.compute.manager.ComputeManager._revert_allocation')
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_reschedule_resize_or_reraise')
+ @mock.patch('nova.compute.utils.add_instance_fault_from_exc')
+ # this is almost copy-paste from test_prep_resize_fails_rollback
+ def test_prep_resize_fails_group_validation(
+ self, add_instance_fault_from_exc, _reschedule_resize_or_reraise,
+ _revert_allocation, mock_instance_save,
+ notify_about_resize_prep_instance, _notify_about_instance_usage,
+ notify_usage_exists):
+ """Tests that if _validate_instance_group_policy raises
+ InstanceFaultRollback, the instance.vm_state is reset properly in
+ _error_out_instance_on_exception
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, host=self.compute.host, vm_state=vm_states.STOPPED,
+ node='fake-node', expected_attrs=['system_metadata', 'flavor'])
+ migration = mock.MagicMock(spec='nova.objects.Migration')
+ request_spec = mock.MagicMock(spec='nova.objects.RequestSpec')
+ ex = exception.RescheduledException(
+ instance_uuid=instance.uuid, reason="policy violated")
+ ex2 = exception.InstanceFaultRollback(
+ inner_exception=ex)
+
+ def fake_reschedule_resize_or_reraise(*args, **kwargs):
+ raise ex2
+
+ _reschedule_resize_or_reraise.side_effect = (
+ fake_reschedule_resize_or_reraise)
+
+ with mock.patch.object(self.compute, '_validate_instance_group_policy',
+ side_effect=ex):
+ self.assertRaises(
+ # _error_out_instance_on_exception should reraise the
+ # RescheduledException inside InstanceFaultRollback.
+ exception.RescheduledException, self.compute.prep_resize,
+ self.context, instance.image_meta, instance, instance.flavor,
+ request_spec, filter_properties={}, node=instance.node,
+ clean_shutdown=True, migration=migration, host_list=[])
+ # The instance.vm_state should remain unchanged
+ # (_error_out_instance_on_exception will set to ACTIVE by default).
+ self.assertEqual(vm_states.STOPPED, instance.vm_state)
+
@mock.patch('nova.compute.rpcapi.ComputeAPI.resize_instance')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index 98e162d59c..27677bbf78 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -15,6 +15,7 @@
"""Tests for nova websocketproxy."""
import copy
+import io
import socket
import mock
@@ -626,6 +627,70 @@ class NovaProxyRequestHandlerBaseTestCase(test.NoDBTestCase):
self.wh.server.top_new_client(conn, address)
self.assertIsNone(self.wh._compute_rpcapi)
+ def test_reject_open_redirect(self):
+ # This will test the behavior when an attempt is made to cause an open
+ # redirect. It should be rejected.
+ mock_req = mock.MagicMock()
+ mock_req.makefile().readline.side_effect = [
+ b'GET //example.com/%2F.. HTTP/1.1\r\n',
+ b''
+ ]
+
+ client_addr = ('8.8.8.8', 54321)
+ mock_server = mock.MagicMock()
+ # This specifies that the server will be able to handle requests other
+ # than only websockets.
+ mock_server.only_upgrade = False
+
+ # Constructing a handler will process the mock_req request passed in.
+ handler = websocketproxy.NovaProxyRequestHandler(
+ mock_req, client_addr, mock_server)
+
+ # Collect the response data to verify at the end. The
+ # SimpleHTTPRequestHandler writes the response data to a 'wfile'
+ # attribute.
+ output = io.BytesIO()
+ handler.wfile = output
+ # Process the mock_req again to do the capture.
+ handler.do_GET()
+ output.seek(0)
+ result = output.readlines()
+
+ # Verify no redirect happens and instead a 400 Bad Request is returned.
+ self.assertIn('400 URI must not start with //', result[0].decode())
+
+ def test_reject_open_redirect_3_slashes(self):
+ # This will test the behavior when an attempt is made to cause an open
+ # redirect. It should be rejected.
+ mock_req = mock.MagicMock()
+ mock_req.makefile().readline.side_effect = [
+ b'GET ///example.com/%2F.. HTTP/1.1\r\n',
+ b''
+ ]
+
+ client_addr = ('8.8.8.8', 54321)
+ mock_server = mock.MagicMock()
+ # This specifies that the server will be able to handle requests other
+ # than only websockets.
+ mock_server.only_upgrade = False
+
+ # Constructing a handler will process the mock_req request passed in.
+ handler = websocketproxy.NovaProxyRequestHandler(
+ mock_req, client_addr, mock_server)
+
+ # Collect the response data to verify at the end. The
+ # SimpleHTTPRequestHandler writes the response data to a 'wfile'
+ # attribute.
+ output = io.BytesIO()
+ handler.wfile = output
+ # Process the mock_req again to do the capture.
+ handler.do_GET()
+ output.seek(0)
+ result = output.readlines()
+
+ # Verify no redirect happens and instead a 400 Bad Request is returned.
+ self.assertIn('400 URI must not start with //', result[0].decode())
+
class NovaWebsocketSecurityProxyTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py
index fb42d1af52..76f301e786 100644
--- a/nova/tests/unit/db/test_db_api.py
+++ b/nova/tests/unit/db/test_db_api.py
@@ -2086,6 +2086,14 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
+ def test_instance_get_with_meta(self):
+ inst_id = self.create_instance_with_args().id
+ inst = db.instance_get(self.ctxt, inst_id)
+ meta = utils.metadata_to_dict(inst['metadata'])
+ self.assertEqual(meta, self.sample_data['metadata'])
+ sys_meta = utils.metadata_to_dict(inst['system_metadata'])
+ self.assertEqual(sys_meta, self.sample_data['system_metadata'])
+
def test_instance_update(self):
instance = self.create_instance_with_args()
metadata = {'host': 'bar', 'key2': 'wuff'}
diff --git a/nova/tests/unit/image/test_glance.py b/nova/tests/unit/image/test_glance.py
index c5e714c1b0..59f5710d49 100644
--- a/nova/tests/unit/image/test_glance.py
+++ b/nova/tests/unit/image/test_glance.py
@@ -305,6 +305,11 @@ class TestExceptionTranslations(test.NoDBTestCase):
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotFound)
+ def test_client_httpoverlimit_converts_to_imagequotaexceeded(self):
+ in_exc = glanceclient.exc.HTTPOverLimit('123')
+ out_exc = glance._translate_image_exception('123', in_exc)
+ self.assertIsInstance(out_exc, exception.ImageQuotaExceeded)
+
class TestGlanceSerializer(test.NoDBTestCase):
def test_serialize(self):
diff --git a/nova/tests/unit/objects/test_instance_mapping.py b/nova/tests/unit/objects/test_instance_mapping.py
index ec50517a20..2c877c0a1f 100644
--- a/nova/tests/unit/objects/test_instance_mapping.py
+++ b/nova/tests/unit/objects/test_instance_mapping.py
@@ -12,6 +12,7 @@
import mock
from oslo_utils import uuidutils
+from sqlalchemy.orm import exc as orm_exc
from nova import exception
from nova import objects
@@ -151,6 +152,14 @@ class _TestInstanceMappingObject(object):
comparators={
'cell_mapping': self._check_cell_map_value})
+ @mock.patch.object(instance_mapping.InstanceMapping, '_save_in_db')
+ def test_save_stale_data_error(self, save_in_db):
+ save_in_db.side_effect = orm_exc.StaleDataError
+ mapping_obj = objects.InstanceMapping(self.context)
+ mapping_obj.instance_uuid = uuidutils.generate_uuid()
+
+ self.assertRaises(exception.InstanceMappingNotFound, mapping_obj.save)
+
@mock.patch.object(instance_mapping.InstanceMapping, '_destroy_in_db')
def test_destroy(self, destroy_in_db):
uuid = uuidutils.generate_uuid()
diff --git a/nova/tests/unit/test_hacking.py b/nova/tests/unit/test_hacking.py
index e942b3a073..186c8383a0 100644
--- a/nova/tests/unit/test_hacking.py
+++ b/nova/tests/unit/test_hacking.py
@@ -1007,7 +1007,7 @@ class HackingTestCase(test.NoDBTestCase):
expected_errors=errors, filename="nova/tests/unit/test_context.py")
# Check no errors in other than 'nova/tests' directory.
self._assert_has_no_errors(
- code, checks.nonexistent_assertion_methods_and_attributes,
+ code, checks.useless_assertion,
filename="nova/compute/api.py")
code = """
self.assertIsNone(None_test_var, "Fails")
diff --git a/nova/tests/unit/test_metadata.py b/nova/tests/unit/test_metadata.py
index 7a6ecf3af2..c14544ba15 100644
--- a/nova/tests/unit/test_metadata.py
+++ b/nova/tests/unit/test_metadata.py
@@ -329,12 +329,14 @@ class MetadataTestCase(test.TestCase):
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': None,
'default_ephemeral_device': None,
- 'default_swap_device': None})
+ 'default_swap_device': None,
+ 'context': self.context})
instance_ref1 = objects.Instance(**{'id': 0,
'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
'root_device_name': '/dev/sda1',
'default_ephemeral_device': None,
- 'default_swap_device': None})
+ 'default_swap_device': None,
+ 'context': self.context})
def fake_bdm_get(ctxt, uuid):
return [fake_block_device.FakeDbBlockDeviceDict(
@@ -373,10 +375,12 @@ class MetadataTestCase(test.TestCase):
'swap': '/dev/sdc',
'ebs0': '/dev/sdh'}
- self.assertEqual(base._format_instance_mapping(self.context,
- instance_ref0), block_device._DEFAULT_MAPPINGS)
- self.assertEqual(base._format_instance_mapping(self.context,
- instance_ref1), expected)
+ self.assertEqual(
+ base._format_instance_mapping(instance_ref0),
+ block_device._DEFAULT_MAPPINGS)
+ self.assertEqual(
+ base._format_instance_mapping(instance_ref1),
+ expected)
def test_pubkey(self):
md = fake_InstanceMetadata(self, self.instance.obj_clone())
diff --git a/nova/tests/unit/virt/libvirt/fake_imagebackend.py b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
index d73a396ab5..4a940fa45e 100644
--- a/nova/tests/unit/virt/libvirt/fake_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
@@ -184,11 +184,17 @@ class ImageBackendFixture(fixtures.Fixture):
# class.
image_init.SUPPORTS_CLONE = False
- # Ditto for the 'is_shared_block_storage' function
+ # Ditto for the 'is_shared_block_storage' function and
+ # 'is_file_in_instance_path'
def is_shared_block_storage():
return False
+ def is_file_in_instance_path():
+ return False
+
setattr(image_init, 'is_shared_block_storage', is_shared_block_storage)
+ setattr(image_init, 'is_file_in_instance_path',
+ is_file_in_instance_path)
return image_init
diff --git a/nova/tests/unit/virt/libvirt/fakelibvirt.py b/nova/tests/unit/virt/libvirt/fakelibvirt.py
index 287a00efa2..676a3542b1 100644
--- a/nova/tests/unit/virt/libvirt/fakelibvirt.py
+++ b/nova/tests/unit/virt/libvirt/fakelibvirt.py
@@ -770,6 +770,7 @@ class Domain(object):
self._has_saved_state = False
self._snapshots = {}
self._id = self._connection._id_counter
+ self._job_type = VIR_DOMAIN_JOB_UNBOUNDED
def _parse_definition(self, xml):
try:
@@ -1237,7 +1238,17 @@ class Domain(object):
return [0] * 12
def jobStats(self, flags=0):
- return {}
+ # NOTE(artom) By returning VIR_DOMAIN_JOB_UNBOUNDED, we're pretending a
+ # job is constantly running. Tests are expected to call the
+ # complete_job or fail_job methods when they're ready for jobs (read:
+ # live migrations) to "complete".
+ return {'type': self._job_type}
+
+ def complete_job(self):
+ self._job_type = VIR_DOMAIN_JOB_COMPLETED
+
+ def fail_job(self):
+ self._job_type = VIR_DOMAIN_JOB_FAILED
def injectNMI(self, flags=0):
return 0
@@ -1702,6 +1713,16 @@ virSecret = Secret
virNWFilter = NWFilter
+# A private libvirt-python class and global only provided here for testing to
+# ensure it's not returned by libvirt.host.Host.get_libvirt_proxy_classes.
+class FakeHandler(object):
+ def __init__(self):
+ pass
+
+
+_EventAddHandleFunc = FakeHandler
+
+
class FakeLibvirtFixture(fixtures.Fixture):
"""Performs global setup/stubbing for all libvirt tests.
"""
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 3b131f4da7..93b4158304 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -4447,6 +4447,19 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_get_guest_config_windows_hyperv_all_hide_flv(self):
# Similar to test_get_guest_config_windows_hyperv_feature2
# but also test hiding the HyperV signature with the flavor
+ # extra_spec "hw:hide_hypervisor_id"
+ flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
+ extra_specs={"hw:hide_hypervisor_id": "true"},
+ expected_attrs={"extra_specs"})
+ # this works for kvm (the default, tested below) and qemu
+ self.flags(virt_type='qemu', group='libvirt')
+
+ self._test_get_guest_config_windows_hyperv(
+ flavor=flavor_hide_id, hvid_hidden=True)
+
+ def test_get_guest_config_windows_hyperv_all_hide_flv_old(self):
+ # Similar to test_get_guest_config_windows_hyperv_feature2
+ # but also test hiding the HyperV signature with the flavor
# extra_spec "hide_hypervisor_id"
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
extra_specs={"hide_hypervisor_id": "true"},
@@ -4471,10 +4484,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_get_guest_config_windows_hyperv_all_hide_flv_img(self):
# Similar to test_get_guest_config_windows_hyperv_feature2
# but also test hiding the HyperV signature with both the flavor
- # extra_spec "hide_hypervisor_id" and the image property
+ # extra_spec "hw:hide_hypervisor_id" and the image property
# "img_hide_hypervisor_id"
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
- extra_specs={"hide_hypervisor_id": "true"},
+ extra_specs={"hw:hide_hypervisor_id": "true"},
expected_attrs={"extra_specs"})
self.flags(virt_type='qemu', group='libvirt')
@@ -6087,7 +6100,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[4].type, "spice")
self.assertEqual(cfg.devices[5].type, "qxl")
- self.assertEqual(cfg.devices[5].vram, 64 * units.Mi / units.Ki)
+ self.assertEqual(cfg.devices[5].vram, 65536)
def _test_add_video_driver(self, model):
self.flags(virt_type='kvm', group='libvirt')
@@ -6098,15 +6111,19 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
guest = vconfig.LibvirtConfigGuest()
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
+ flavor = objects.Flavor(
+ extra_specs={'hw_video:ram_max_mb': '512'})
image_meta = objects.ImageMeta.from_dict({
- 'properties': {'hw_video_model': model}})
+ 'properties': {
+ 'hw_video_model': model,
+ 'hw_video_ram': 8,
+ },
+ })
self.assertTrue(drvr._guest_add_video_device(guest))
- video = drvr._add_video_driver(guest, image_meta,
- flavor)
+ video = drvr._add_video_driver(guest, image_meta, flavor)
self.assertEqual(model, video.type)
+ self.assertEqual(8192, video.vram) # should be in bytes
def test__add_video_driver(self):
self._test_add_video_driver('qxl')
@@ -6518,7 +6535,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self):
# Input to the test: flavor extra_specs
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
- extra_specs={"hide_hypervisor_id": "true"},
+ extra_specs={"hw:hide_hypervisor_id": "true"},
expected_attrs={"extra_specs"})
self.flags(virt_type='kvm', group='libvirt')
@@ -6544,7 +6561,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# Input to the test: image metadata (true) and flavor
# extra_specs (true)
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
- extra_specs={"hide_hypervisor_id": "true"},
+ extra_specs={"hw:hide_hypervisor_id": "true"},
expected_attrs={"extra_specs"})
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
@@ -6571,7 +6588,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# Input to the test: image metadata (false) and flavor
# extra_specs (true)
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
- extra_specs={"hide_hypervisor_id": "true"},
+ extra_specs={"hw:hide_hypervisor_id": "true"},
expected_attrs={"extra_specs"})
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
@@ -6596,7 +6613,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# Input to the test: image metadata (true) and flavor
# extra_specs (false)
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
- extra_specs={"hide_hypervisor_id": "false"},
+ extra_specs={"hw:hide_hypervisor_id": "false"},
expected_attrs={"extra_specs"})
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
@@ -6643,7 +6660,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_get_guest_config_without_hiding_hypervisor_id_flavor_extra_specs(
self):
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
- extra_specs={"hide_hypervisor_id": "false"},
+ extra_specs={"hw:hide_hypervisor_id": "false"},
expected_attrs={"extra_specs"})
self.flags(virt_type='qemu', group='libvirt')
@@ -21781,6 +21798,74 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_rbd_driver.flatten.assert_called_once_with(
mock.sentinel.rbd_name, pool=mock.sentinel.rbd_pool)
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_try_fetch_image_cache')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_rebase_with_qemu_img')
+ def _test_unshelve_qcow2_rebase_image_during_create(self,
+ mock_rebase, mock_fetch, original_image_in_glance=True):
+ self.flags(images_type='qcow2', group='libvirt')
+
+ # Original image ref from where instance was created, before SHELVE
+ # occurs, base_root_fname is related backing file name.
+ base_image_ref = 'base_image_ref'
+ base_root_fname = imagecache.get_cache_fname(base_image_ref)
+ # Snapshot image ref created during SHELVE.
+ shelved_image_ref = 'shelved_image_ref'
+ shelved_root_fname = imagecache.get_cache_fname(shelved_image_ref)
+
+ # Instance state during unshelve spawn().
+ inst_params = {
+ 'image_ref': shelved_image_ref,
+ 'vm_state': vm_states.SHELVED_OFFLOADED,
+ 'system_metadata': {'image_base_image_ref': base_image_ref}
+ }
+
+ instance = self._create_instance(params=inst_params)
+ disk_images = {'image_id': instance.image_ref}
+ instance_dir = libvirt_utils.get_instance_path(instance)
+ disk_path = os.path.join(instance_dir, 'disk')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ if original_image_in_glance:
+ # We expect final backing file is original image, not shelved one.
+ expected_backing_file = os.path.join(
+ imagecache.ImageCacheManager().cache_dir,
+ base_root_fname)
+ else:
+ # None means rebase will merge backing file into disk(flatten).
+ expected_backing_file = None
+ mock_fetch.side_effect = [
+ None,
+ exception.ImageNotFound(image_id=base_image_ref)
+ ]
+
+ drvr._create_and_inject_local_root(
+ self.context, instance, False, '', disk_images, None, None)
+
+ mock_fetch.assert_has_calls([
+ mock.call(test.MatchType(nova.virt.libvirt.imagebackend.Qcow2),
+ libvirt_utils.fetch_image,
+ self.context, shelved_root_fname, shelved_image_ref,
+ instance, instance.root_gb * units.Gi, None),
+ mock.call(test.MatchType(nova.virt.libvirt.imagebackend.Qcow2),
+ libvirt_utils.fetch_image,
+ self.context, base_root_fname, base_image_ref,
+ instance, None)])
+ mock_rebase.assert_called_once_with(disk_path, expected_backing_file)
+
+ def test_unshelve_qcow2_rebase_image_during_create(self):
+ # Original image is present in Glance. In that case the 2nd
+ # fetch succeeds and we rebase instance disk to original image backing
+ # file, instance is back to nominal state: after unshelve,
+ # instance.image_ref will match current backing file.
+ self._test_unshelve_qcow2_rebase_image_during_create()
+
+ def test_unshelve_qcow2_rebase_image_during_create_notfound(self):
+ # Original image is no longer available in Glance, so 2nd fetch
+ # will failed (HTTP 404). In that case qemu-img rebase will merge
+ # backing file into disk, removing backing file dependency.
+ self._test_unshelve_qcow2_rebase_image_during_create(
+ original_image_in_glance=False)
+
@mock.patch('nova.virt.libvirt.driver.imagebackend')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._inject_data')
@mock.patch('nova.virt.libvirt.driver.imagecache')
@@ -21799,6 +21884,52 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
None)
self.assertFalse(mock_inject.called)
+ @mock.patch('nova.virt.libvirt.utils.fetch_image')
+ @mock.patch('nova.virt.libvirt.storage.rbd_utils.RBDDriver')
+ @mock.patch.object(imagebackend, 'IMAGE_API')
+ def test_create_fetch_image_ceph_workaround(self, mock_image, mock_rbd,
+ mock_fetch):
+ # Make sure that rbd clone will fail as un-clone-able
+ mock_rbd.is_cloneable.return_value = False
+ # Make sure the rbd code thinks the image does not already exist
+ mock_rbd.return_value.exists.return_value = False
+ # Make sure the rbd code says the image is small
+ mock_rbd.return_value.size.return_value = 128 * units.Mi
+ # Make sure IMAGE_API.get() returns a raw image
+ mock_image.get.return_value = {'locations': [], 'disk_format': 'raw'}
+
+ instance = self._create_instance()
+ disk_images = {'image_id': 'foo'}
+ self.flags(images_type='rbd', group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ def do_create():
+ # Reset the fetch mock and run our driver method so we can
+ # check for called-ness after each attempt
+ mock_fetch.reset_mock()
+ drvr._create_and_inject_local_root(self.context,
+ instance,
+ False,
+ '',
+ disk_images,
+ get_injection_info(),
+ None)
+
+ # Do an image create with rbd
+ do_create()
+ # Make sure it tried fetch, which implies that it tried and
+ # failed to clone.
+ mock_fetch.assert_called()
+
+ # Enable the workaround
+ self.flags(never_download_image_if_on_rbd=True,
+ group='workarounds')
+ # Ensure that we raise the original ImageUnacceptable from the
+ # failed clone...
+ self.assertRaises(exception.ImageUnacceptable, do_create)
+ # ...and ensure that we did _not_ try to fetch
+ mock_fetch.assert_not_called()
+
@mock.patch('nova.virt.netutils.get_injected_network_template')
@mock.patch('nova.virt.disk.api.inject_data')
@mock.patch.object(libvirt_driver.LibvirtDriver, "_conn")
@@ -23841,6 +23972,25 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
</cpu>
'''], 1)
+ @mock.patch('nova.virt.images.qemu_img_info',
+ return_value=mock.Mock(file_format="fake_fmt"))
+ @mock.patch('oslo_concurrency.processutils.execute')
+ def test_rebase_with_qemu_img(self, mock_execute, mock_qemu_img_info):
+ """rebasing disk image to another backing file"""
+ self.drvr._rebase_with_qemu_img("disk", "backing_file")
+ mock_qemu_img_info.assert_called_once_with("backing_file")
+ mock_execute.assert_called_once_with('qemu-img', 'rebase',
+ '-b', 'backing_file', '-F',
+ 'fake_fmt', 'disk')
+
+ # Flatten disk image when no backing file is given.
+ mock_qemu_img_info.reset_mock()
+ mock_execute.reset_mock()
+ self.drvr._rebase_with_qemu_img("disk", None)
+ self.assertEqual(0, mock_qemu_img_info.call_count)
+ mock_execute.assert_called_once_with('qemu-img', 'rebase',
+ '-b', '', 'disk')
+
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
@@ -24460,8 +24610,23 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
not running should trigger a blockRebase using qemu-img not libvirt.
In this test, we rebase the image with another image as backing file.
"""
+ dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='/var/lib/nova/instances/%s/disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio' serial='1234'/>
+ </disk>
+ </devices>
+ </domain>""" % self.inst['uuid']
+
mock_domain, guest = self._setup_block_rebase_domain_and_guest_mocks(
- self.dom_xml)
+ dom_xml)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
@@ -24472,10 +24637,13 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
self.delete_info_1)
mock_disk_op_sema.__enter__.assert_called_once()
- mock_qemu_img_info.assert_called_once_with("snap.img")
- mock_execute.assert_called_once_with('qemu-img', 'rebase',
- '-b', 'snap.img', '-F',
- 'fake_fmt', 'disk1_file')
+ mock_qemu_img_info.assert_called_once_with(
+ "/var/lib/nova/instances/%s/snap.img" % instance.uuid)
+ mock_execute.assert_called_once_with(
+ 'qemu-img', 'rebase',
+ '-b', '/var/lib/nova/instances/%s/snap.img' % instance.uuid,
+ '-F', 'fake_fmt',
+ '/var/lib/nova/instances/%s/disk1_file' % instance.uuid)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch.object(host.Host, "has_min_version",
@@ -25471,6 +25639,15 @@ class LibvirtPMEMNamespaceTests(test.NoDBTestCase):
self.assertRaises(exception.PMEMNamespaceConfigInvalid,
drvr._discover_vpmems, vpmem_conf)
+ @mock.patch('nova.privsep.libvirt.get_pmem_namespaces')
+ def test_get_vpmems_on_host__exception(self, mock_get_ns):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_get_ns.side_effect = Exception('foo')
+
+ self.assertRaises(
+ exception.GetPMEMNamespacesFailed,
+ drvr._get_vpmems_on_host)
+
@mock.patch('nova.virt.hardware.get_vpmems')
def test_get_ordered_vpmems(self, mock_labels):
# get orgered vpmems based on flavor extra_specs
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index c9411d6eb7..4c4b1d45c8 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -1322,8 +1322,9 @@ class LibvirtTpoolProxyTestCase(test.NoDBTestCase):
self.assertIn(fakelibvirt.virSecret, proxy_classes)
self.assertIn(fakelibvirt.virNWFilter, proxy_classes)
- # Assert that we filtered out libvirtError
+ # Assert that we filtered out libvirtError and any private classes
self.assertNotIn(fakelibvirt.libvirtError, proxy_classes)
+ self.assertNotIn(fakelibvirt._EventAddHandleFunc, proxy_classes)
def test_tpool_get_connection(self):
# Test that Host.get_connection() returns a tpool.Proxy
diff --git a/nova/tests/unit/virt/libvirt/test_migration.py b/nova/tests/unit/virt/libvirt/test_migration.py
index 7adfb0ef65..07d11f94e5 100644
--- a/nova/tests/unit/virt/libvirt/test_migration.py
+++ b/nova/tests/unit/virt/libvirt/test_migration.py
@@ -955,7 +955,48 @@ class UtilityMigrationTestCase(test.NoDBTestCase):
doc = etree.fromstring(original_xml)
ex = self.assertRaises(KeyError, migration._update_vif_xml,
doc, data, get_vif_config)
- self.assertIn("CA:FE:DE:AD:BE:EF", six.text_type(ex))
+ self.assertIn("ca:fe:de:ad:be:ef", six.text_type(ex))
+
+ def test_update_vif_xml_lower_case_mac(self):
+ """Tests that the vif in the migrate data is not found in the existing
+ guest interfaces.
+ """
+ conf = vconfig.LibvirtConfigGuestInterface()
+ conf.net_type = "bridge"
+ conf.source_dev = "qbra188171c-ea"
+ conf.target_dev = "tapa188171c-ea"
+ conf.mac_addr = "DE:AD:BE:EF:CA:FE"
+ conf.model = "virtio"
+ original_xml = """<domain>
+ <uuid>3de6550a-8596-4937-8046-9d862036bca5</uuid>
+ <devices>
+ <interface type="bridge">
+ <mac address="de:ad:be:ef:ca:fe"/>
+ <model type="virtio"/>
+ <source bridge="qbra188171c-ea"/>
+ <target dev="tapa188171c-ea"/>
+ <virtualport type="openvswitch">
+ <parameters interfaceid="%s"/>
+ </virtualport>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04'
+ function='0x0'/>
+ </interface>
+ </devices>
+ </domain>""" % uuids.ovs
+ expected_xml = """<domain>
+ <uuid>3de6550a-8596-4937-8046-9d862036bca5</uuid>
+ <devices>
+ <interface type="bridge">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source bridge="qbra188171c-ea"/>
+ <target dev="tapa188171c-ea"/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04'
+ function='0x0'/>
+ </interface>
+ </devices>
+ </domain>"""
+ self._test_update_vif_xml(conf, original_xml, expected_xml)
class MigrationMonitorTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index c457b7b89b..ff69b3f9c9 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -39,6 +39,7 @@ from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_block_device
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import utils as test_utils
+from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt import block_device as driver_block_device
from nova.virt import event as virtevent
from nova.virt import fake
@@ -617,6 +618,10 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
network_info = test_utils.get_test_network_info()
self.connection.unfilter_instance(instance_ref, network_info)
+ @mock.patch(
+ 'nova.tests.unit.virt.libvirt.fakelibvirt.Domain.jobStats',
+ new=mock.Mock(return_value={
+ 'type': fakelibvirt.VIR_DOMAIN_JOB_COMPLETED}))
def test_live_migration(self):
instance_ref, network_info = self._get_running_instance()
fake_context = context.RequestContext('fake', 'fake')
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index f4df91e494..da98829453 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -2861,32 +2861,15 @@ class LibvirtDriver(driver.ComputeDriver):
timer.start(interval=0.5).wait()
@staticmethod
- def _rebase_with_qemu_img(guest, device, active_disk_object,
- rebase_base):
- """Rebase a device tied to a guest using qemu-img.
-
- :param guest:the Guest which owns the device being rebased
- :type guest: nova.virt.libvirt.guest.Guest
- :param device: the guest block device to rebase
- :type device: nova.virt.libvirt.guest.BlockDevice
- :param active_disk_object: the guest block device to rebase
- :type active_disk_object: nova.virt.libvirt.config.\
- LibvirtConfigGuestDisk
+ def _rebase_with_qemu_img(source_path, rebase_base):
+ """Rebase a disk using qemu-img.
+
+ :param source_path: the disk source path to rebase
+ :type source_path: string
:param rebase_base: the new parent in the backing chain
:type rebase_base: None or string
"""
- # It's unsure how well qemu-img handles network disks for
- # every protocol. So let's be safe.
- active_protocol = active_disk_object.source_protocol
- if active_protocol is not None:
- msg = _("Something went wrong when deleting a volume snapshot: "
- "rebasing a %(protocol)s network disk using qemu-img "
- "has not been fully tested") % {'protocol':
- active_protocol}
- LOG.error(msg)
- raise exception.InternalError(msg)
-
if rebase_base is None:
# If backing_file is specified as "" (the empty string), then
# the image is rebased onto no backing file (i.e. it will exist
@@ -2897,11 +2880,20 @@ class LibvirtDriver(driver.ComputeDriver):
# If the rebased image is going to have a backing file then
# explicitly set the backing file format to avoid any security
# concerns related to file format auto detection.
- backing_file = rebase_base
+ if os.path.isabs(rebase_base):
+ backing_file = rebase_base
+ else:
+ # this is a probably a volume snapshot case where the
+ # rebase_base is relative. See bug
+ # https://bugs.launchpad.net/nova/+bug/1885528
+ backing_file_name = os.path.basename(rebase_base)
+ volume_path = os.path.dirname(source_path)
+ backing_file = os.path.join(volume_path, backing_file_name)
+
b_file_fmt = images.qemu_img_info(backing_file).file_format
qemu_img_extra_arg = ['-F', b_file_fmt]
- qemu_img_extra_arg.append(active_disk_object.source_path)
+ qemu_img_extra_arg.append(source_path)
# execute operation with disk concurrency semaphore
with compute_utils.disk_ops_semaphore:
processutils.execute("qemu-img", "rebase", "-b", backing_file,
@@ -3064,7 +3056,18 @@ class LibvirtDriver(driver.ComputeDriver):
else:
LOG.debug('Guest is not running so doing a block rebase '
'using "qemu-img rebase"', instance=instance)
- self._rebase_with_qemu_img(guest, dev, active_disk_object,
+
+ # It's unsure how well qemu-img handles network disks for
+ # every protocol. So let's be safe.
+ active_protocol = active_disk_object.source_protocol
+ if active_protocol is not None:
+ msg = _("Something went wrong when deleting a volume "
+ "snapshot: rebasing a %(protocol)s network disk "
+ "using qemu-img has not been fully tested"
+ ) % {'protocol': active_protocol}
+ LOG.error(msg)
+ raise exception.InternalError(msg)
+ self._rebase_with_qemu_img(active_disk_object.source_path,
rebase_base)
else:
@@ -4021,9 +4024,24 @@ class LibvirtDriver(driver.ComputeDriver):
backend.create_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
+ refuse_fetch = (
+ CONF.libvirt.images_type == 'rbd' and
+ CONF.workarounds.never_download_image_if_on_rbd)
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
+ if refuse_fetch:
+ # Re-raise the exception from the failed
+ # ceph clone. The compute manager expects
+ # ImageUnacceptable as a possible result
+ # of spawn(), from which this is called.
+ with excutils.save_and_reraise_exception():
+ LOG.warning(
+ 'Image %s is not on my ceph and '
+ '[workarounds]/'
+ 'never_download_image_if_on_rbd=True;'
+ ' refusing to fetch and upload.',
+ disk_images['image_id'])
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
@@ -4032,6 +4050,14 @@ class LibvirtDriver(driver.ComputeDriver):
root_fname, disk_images['image_id'],
instance, size, fallback_from_host)
+ # During unshelve on Qcow2 backend, we spawn() using snapshot image
+ # created during shelve. Extra work is needed in order to rebase
+ # disk image to its original image_ref. Disk backing file will
+ # then represent back image_ref instead of shelved image.
+ if (instance.vm_state == vm_states.SHELVED_OFFLOADED and
+ isinstance(backend, imagebackend.Qcow2)):
+ self._finalize_unshelve_qcow2_image(context, instance, backend)
+
if need_inject:
self._inject_data(backend, instance, injection_info)
@@ -4041,6 +4067,36 @@ class LibvirtDriver(driver.ComputeDriver):
return created_disks
+ def _finalize_unshelve_qcow2_image(self, context, instance, backend):
+ # NOTE(aarents): During qcow2 instance unshelve, backing file
+ # represents shelved image, not original instance.image_ref.
+ # We rebase here instance disk to original image.
+ # This second fetch call does nothing except downloading original
+ # backing file if missing, as image disk have already been
+ # created/resized by first fetch call.
+ base_dir = self.image_cache_manager.cache_dir
+ base_image_ref = instance.system_metadata.get('image_base_image_ref')
+ root_fname = imagecache.get_cache_fname(base_image_ref)
+ base_backing_fname = os.path.join(base_dir, root_fname)
+
+ try:
+ self._try_fetch_image_cache(backend, libvirt_utils.fetch_image,
+ context, root_fname, base_image_ref,
+ instance, None)
+ except exception.ImageNotFound:
+ # We must flatten here in order to remove dependency with an orphan
+ # backing file (as shelved image will be dropped once unshelve
+ # is successfull).
+ LOG.warning('Current disk image is created on top of shelved '
+ 'image and cannot be rebased to original image '
+ 'because it is no longer available in the image '
+ 'service, disk will be consequently flattened.',
+ instance=instance)
+ base_backing_fname = None
+
+ LOG.info('Rebasing disk image.', instance=instance)
+ self._rebase_with_qemu_img(backend.path, base_backing_fname)
+
def _create_configdrive(self, context, instance, injection_info,
rescue=False):
# As this method being called right after the definition of a
@@ -5126,7 +5182,9 @@ class LibvirtDriver(driver.ComputeDriver):
flavor):
hide_hypervisor_id = (strutils.bool_from_string(
flavor.extra_specs.get('hide_hypervisor_id')) or
- image_meta.properties.get('img_hide_hypervisor_id'))
+ strutils.bool_from_string(
+ flavor.extra_specs.get('hw:hide_hypervisor_id')) or
+ image_meta.properties.get('img_hide_hypervisor_id'))
if virt_type == "xen":
# PAE only makes sense in X86
@@ -5234,7 +5292,7 @@ class LibvirtDriver(driver.ComputeDriver):
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
- video.vram = video_ram * units.Mi / units.Ki
+ video.vram = video_ram * units.Mi // units.Ki
guest.add_device(video)
# NOTE(sean-k-mooney): return the video device we added
diff --git a/nova/virt/libvirt/guest.py b/nova/virt/libvirt/guest.py
index 318294c659..a7fbc50ef2 100644
--- a/nova/virt/libvirt/guest.py
+++ b/nova/virt/libvirt/guest.py
@@ -367,8 +367,8 @@ class Guest(object):
return devs
def detach_device_with_retry(self, get_device_conf_func, device, live,
- max_retry_count=7, inc_sleep_time=2,
- max_sleep_time=30,
+ max_retry_count=7, inc_sleep_time=10,
+ max_sleep_time=60,
alternative_device_name=None,
supports_device_missing_error_code=False):
"""Detaches a device from the guest. After the initial detach request,
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index 603ab9925f..d8078dd387 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -124,15 +124,15 @@ class Host(object):
@staticmethod
def _get_libvirt_proxy_classes(libvirt_module):
"""Return a tuple for tpool.Proxy's autowrap argument containing all
- classes defined by the libvirt module except libvirtError.
+ public vir* classes defined by the libvirt module.
"""
# Get a list of (name, class) tuples of libvirt classes
classes = inspect.getmembers(libvirt_module, inspect.isclass)
- # Return a list of just the classes, filtering out libvirtError because
- # we don't need to proxy that
- return tuple([cls[1] for cls in classes if cls[0] != 'libvirtError'])
+ # Return a list of just the vir* classes, filtering out libvirtError
+ # and any private globals pointing at private internal classes.
+ return tuple([cls[1] for cls in classes if cls[0].startswith("vir")])
def _wrap_libvirt_proxy(self, obj):
"""Return an object wrapped in a tpool.Proxy using autowrap appropriate
diff --git a/nova/virt/libvirt/migration.py b/nova/virt/libvirt/migration.py
index 7b7214712c..36a9d4bcd9 100644
--- a/nova/virt/libvirt/migration.py
+++ b/nova/virt/libvirt/migration.py
@@ -346,14 +346,21 @@ def _update_vif_xml(xml_doc, migrate_data, get_vif_config):
instance_uuid = xml_doc.findtext('uuid')
parser = etree.XMLParser(remove_blank_text=True)
interface_nodes = xml_doc.findall('./devices/interface')
- migrate_vif_by_mac = {vif.source_vif['address']: vif
+ # MAC address stored for port in neutron DB and in domain XML
+ # might be in different cases, so to harmonize that
+ # we convert MAC to lower case for dict key.
+ migrate_vif_by_mac = {vif.source_vif['address'].lower(): vif
for vif in migrate_data.vifs}
for interface_dev in interface_nodes:
mac = interface_dev.find('mac')
mac = mac if mac is not None else {}
mac_addr = mac.get('address')
if mac_addr:
- migrate_vif = migrate_vif_by_mac[mac_addr]
+ # MAC address stored in libvirt should always be normalized
+ # and stored in lower case. But just to be extra safe here
+ # we still normalize MAC retrieved from XML to be absolutely
+ # sure it will be the same with the Neutron provided one.
+ migrate_vif = migrate_vif_by_mac[mac_addr.lower()]
vif = migrate_vif.get_dest_vif()
# get_vif_config is a partial function of
# nova.virt.libvirt.vif.LibvirtGenericVIFDriver.get_config
diff --git a/releasenotes/notes/avoid_muli_ceph_download-4083decf501dba40.yaml b/releasenotes/notes/avoid_muli_ceph_download-4083decf501dba40.yaml
new file mode 100644
index 0000000000..f79c278119
--- /dev/null
+++ b/releasenotes/notes/avoid_muli_ceph_download-4083decf501dba40.yaml
@@ -0,0 +1,19 @@
+---
+other:
+ - |
+ Nova now has a config option called
+ ``[workarounds]/never_download_image_if_on_rbd`` which helps to
+ avoid pathological storage behavior with multiple ceph clusters.
+ Currently, Nova does *not* support multiple ceph clusters
+ properly, but Glance can be configured with them. If an instance
+ is booted from an image residing in a ceph cluster other than the
+ one Nova knows about, it will silently download it from Glance and
+ re-upload the image to the local ceph privately for that
+ instance. Unlike the behavior you expect when configuring Nova and
+ Glance for ceph, Nova will continue to do this over and over for
+ the same image when subsequent instances are booted, consuming a
+ large amount of storage unexpectedly. The new workaround option
+ will cause Nova to refuse to do this download/upload behavior and
+ instead fail the instance boot. It is simply a stop-gap effort to
+ allow unsupported deployments with multiple ceph clusters from
+ silently consuming large amounts of disk space.
diff --git a/releasenotes/notes/bug-1821755-7bd03319e34b6b10.yaml b/releasenotes/notes/bug-1821755-7bd03319e34b6b10.yaml
new file mode 100644
index 0000000000..4c6135311b
--- /dev/null
+++ b/releasenotes/notes/bug-1821755-7bd03319e34b6b10.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ Improved detection of anti-affinity policy violation when performing live
+ and cold migrations. Most of the violations caused by race conditions due
+ to performing concurrent live or cold migrations should now be addressed
+ by extra checks in the compute service. Upon detection, cold migration
+ operations are automatically rescheduled, while live migrations have two
+ checks and will be rescheduled if detected by the first one, otherwise the
+ live migration will fail cleanly and revert the instance state back to its
+ previous value.
diff --git a/releasenotes/notes/bug-1841932-c871ac7b3b05d67e.yaml b/releasenotes/notes/bug-1841932-c871ac7b3b05d67e.yaml
new file mode 100644
index 0000000000..d54be4f03a
--- /dev/null
+++ b/releasenotes/notes/bug-1841932-c871ac7b3b05d67e.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Add support for the ``hw:hide_hypervisor_id`` extra spec. This is an
+ alias for the ``hide_hypervisor_id`` extra spec, which was not
+ compatible with the ``AggregateInstanceExtraSpecsFilter`` scheduler
+ filter. See
+ `bug 1841932 <https://bugs.launchpad.net/nova/+bug/1841932>`_ for more
+ details.
diff --git a/releasenotes/notes/console-proxy-reject-open-redirect-4ac0a7895acca7eb.yaml b/releasenotes/notes/console-proxy-reject-open-redirect-4ac0a7895acca7eb.yaml
new file mode 100644
index 0000000000..ce05b9a867
--- /dev/null
+++ b/releasenotes/notes/console-proxy-reject-open-redirect-4ac0a7895acca7eb.yaml
@@ -0,0 +1,19 @@
+---
+security:
+ - |
+ A vulnerability in the console proxies (novnc, serial, spice) that allowed
+ open redirection has been `patched`_. The novnc, serial, and spice console
+ proxies are implemented as websockify servers and the request handler
+ inherits from the python standard SimpleHTTPRequestHandler. There is a
+ `known issue`_ in the SimpleHTTPRequestHandler which allows open redirects
+ by way of URLs in the following format::
+
+ http://vncproxy.my.domain.com//example.com/%2F..
+
+ which if visited, will redirect a user to example.com.
+
+ The novnc, serial, and spice console proxies will now reject requests that
+ pass a redirection URL beginning with "//" with a 400 Bad Request.
+
+ .. _patched: https://bugs.launchpad.net/nova/+bug/1927677
+ .. _known issue: https://bugs.python.org/issue32084
diff --git a/releasenotes/notes/restore-rocky-portbinding-semantics-48e9b1fa969cc5e9.yaml b/releasenotes/notes/restore-rocky-portbinding-semantics-48e9b1fa969cc5e9.yaml
new file mode 100644
index 0000000000..dc33e3c61d
--- /dev/null
+++ b/releasenotes/notes/restore-rocky-portbinding-semantics-48e9b1fa969cc5e9.yaml
@@ -0,0 +1,14 @@
+---
+fixes:
+ - |
+ In the Rocky (18.0.0) release support was added to nova to use neutron's
+ multiple port binding feature when the binding-extended API extension
+ is available. In the Train (20.0.0) release the SR-IOV live migration
+ feature broke the semantics of the vifs field in the ``migration_data``
+ object that signals if the new multiple port binding workflow should
+ be used by always populating it even when the ``binding-extended`` API
+ extension is not present. This broke live migration for any deployment
+ that did not support the optional ``binding-extended`` API extension.
+ The Rocky behavior has now been restored enabling live migration
+ using the single port binding workflow when multiple port bindings
+ are not available.
diff --git a/tox.ini b/tox.ini
index 209875c44f..150d98d0d1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,10 +1,14 @@
[tox]
-minversion = 3.1.1
+minversion = 3.2.0
envlist = py27,py37,functional,pep8
# Automatic envs (pyXX) will only use the python version appropriate to that
# env and ignore basepython inherited from [testenv] if we set
# ignore_basepython_conflict.
ignore_basepython_conflict = True
+# Pin the virtualenv and therefore the setuptools version used for the env
+# creation. This results in a new tox being installed in .tox/.tox virtualenv
+# and the tox on the host will delegate all the calls to the tox in that env.
+requires = virtualenv<20.8
[testenv]
basepython = python3
@@ -57,7 +61,6 @@ commands =
bash -c "! find doc/ -type f -name *.json | xargs grep -U -n $'\r'"
# Check that all included JSON files are valid JSON
bash -c '! find doc/ -type f -name *.json | xargs -t -n1 python -m json.tool 2>&1 > /dev/null | grep -B1 -v ^python'
- bash tools/check-cherry-picks.sh
[testenv:fast8]
description =
@@ -66,6 +69,15 @@ envdir = {toxworkdir}/shared
commands =
bash tools/flake8wrap.sh -HEAD
+[testenv:validate-backport]
+description =
+ Determine whether a backport is ready to be merged by checking whether it has
+ already been merged to master or more recent stable branches.
+deps =
+skipsdist = true
+commands =
+ bash tools/check-cherry-picks.sh
+
[testenv:functional]
# TODO(melwitt): This can be removed when functional tests are gating with
# python 3.x
@@ -119,6 +131,16 @@ deps = {[testenv:functional]deps}
commands =
{[testenv:functional]commands}
+[testenv:functional-without-sample-db-tests]
+description =
+ Run functional tests by excluding the API|Notification
+ sample tests and DB tests. This env is used in
+ placement-nova-tox-functional-py38 job which is defined and
+ run in placement.
+deps = {[testenv:functional]deps}
+commands =
+ stestr --test-path=./nova/tests/functional run --black-regex '((?:api|notification)_sample_tests|functional\.db\.)' {posargs}
+
[testenv:api-samples]
setenv =
{[testenv]setenv}
@@ -280,16 +302,3 @@ usedevelop = False
deps = bindep
commands =
bindep test
-
-[testenv:lower-constraints]
-# We need our own install command to avoid upper constraints being considered
-# when making the environment. Set usedevelop to false to avoid pbr installing
-# requirements for us so all requirements are installed in one call to pip.
-usedevelop = False
-install_command = pip install {opts} {packages}
-deps =
- -c{toxinidir}/lower-constraints.txt
- -r{toxinidir}/test-requirements.txt
- -r{toxinidir}/requirements.txt
-commands =
- stestr run {posargs}