summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml341
-rw-r--r--doc/source/user/feature-matrix-gp.ini24
-rw-r--r--nova/cmd/__init__.py8
-rw-r--r--nova/compute/manager.py22
-rw-r--r--nova/compute/resource_tracker.py35
-rw-r--r--nova/db/sqlalchemy/api.py8
-rw-r--r--nova/objects/instance.py3
-rw-r--r--nova/objects/request_spec.py13
-rw-r--r--nova/scheduler/host_manager.py6
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py95
-rw-r--r--nova/tests/functional/db/test_instance.py11
-rw-r--r--nova/tests/unit/db/test_db_api.py18
-rw-r--r--nova/tests/unit/objects/test_request_spec.py10
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py11
-rw-r--r--nova/tests/unit/virt/libvirt/fakelibvirt.py3
-rwxr-xr-xnova/tests/unit/virt/libvirt/test_driver.py213
-rw-r--r--nova/tests/unit/virt/libvirt/test_guest.py4
-rw-r--r--nova/tests/unit/virt/test_block_device.py2
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py2
-rw-r--r--nova/virt/block_device.py1
-rw-r--r--nova/virt/libvirt/driver.py80
-rwxr-xr-xnova/virt/libvirt/guest.py7
-rw-r--r--releasenotes/notes/bug-1414559-880d6b3c1ce3b95e.yaml8
23 files changed, 603 insertions, 322 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 9a509e0807..c8cf782611 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -69,14 +69,353 @@
post-run: playbooks/legacy/nova-live-migration/post.yaml
- project:
- name: openstack/nova
+ templates:
+ - openstack-python-jobs
+ - openstack-python35-jobs
+ - publish-openstack-sphinx-docs
+ - periodic-stable-jobs
+ - check-requirements
+ - integrated-gate
+ - integrated-gate-py35
+ - release-notes-jobs
check:
jobs:
- nova-live-migration
- nova-tox-functional
- nova-tox-functional-py35
+ - neutron-grenade-multinode:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-grenade-dsvm-neutron-multinode-live-migration:
+ voting: false
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^api-.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*\.py$
+ - ^nova/tests/functional/.*$
+ - ^nova/tests/unit/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-tempest-dsvm-full-devstack-plugin-ceph:
+ voting: false
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - neutron-tempest-linuxbridge:
+ irrelevant-files:
+ - ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - tempest-multinode-full:
+ voting: false
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - tempest-full:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - neutron-grenade:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
gate:
jobs:
- nova-live-migration
- nova-tox-functional
- nova-tox-functional-py35
+ - tempest-full:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - neutron-grenade:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ post:
+ jobs:
+ - openstack-tox-cover
+ experimental:
+ jobs:
+ - legacy-tempest-dsvm-nova-v20-api:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-tempest-dsvm-multinode-full:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-tempest-dsvm-neutron-dvr-multinode-full:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - neutron-tempest-dvr-ha-multinode-full:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-tempest-dsvm-neutron-src-oslo.versionedobjects:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-tempest-dsvm-nova-libvirt-kvm-apr:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-grenade-dsvm-neutron-multinode-zero-downtime:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - ironic-tempest-dsvm-ipa-wholedisk-agent_ipmitool-tinyipa-multinode:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^.git.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^tests-py3.txt$
+ - ironic-tempest-dsvm-bfv:
+ # NOTE: Ironic boot from volume only works starting in stable/pike.
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^.git.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^tests-py3.txt$
+ - legacy-tempest-dsvm-full-devstack-plugin-nfs:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-barbican-simple-crypto-dsvm-tempest-nova
+ - legacy-tempest-dsvm-py35-full-devstack-plugin-ceph:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-tempest-dsvm-neutron-pg-full:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ - legacy-tempest-dsvm-neutron-full-opensuse-423:
+ irrelevant-files:
+ - ^(placement-)?api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tests-py3.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+
diff --git a/doc/source/user/feature-matrix-gp.ini b/doc/source/user/feature-matrix-gp.ini
index 6d70f74894..3cd4ba1a20 100644
--- a/doc/source/user/feature-matrix-gp.ini
+++ b/doc/source/user/feature-matrix-gp.ini
@@ -59,7 +59,7 @@ notes=This includes creating a server, and deleting a server.
using the default disk and network configuration.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/#servers-servers
-admin_doc_link=http://docs.openstack.org/admin-guide/compute-images-instances.html
+admin_doc_link=https://docs.openstack.org/nova/latest/user/launch-instances.html
tempest_test_uuids=9a438d88-10c6-4bcd-8b5b-5b6e25e1346f;585e934c-448e-43c4-acbf-d06a9b899997
libvirt-kvm=complete
libvirt-kvm-s390=unknown
@@ -78,7 +78,7 @@ title=Snapshot Server
notes=This is creating a glance image from the currently running server.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action
-admin_doc_link=http://docs.openstack.org/admin-guide/compute-images-instances.html
+admin_doc_link=https://docs.openstack.org/glance/latest/admin/troubleshooting.html
tempest_test_uuids=aaacd1d0-55a2-4ce8-818a-b5439df8adc9
cli=
libvirt-kvm=complete
@@ -155,7 +155,7 @@ title=Volume Operations
notes=This is about attaching volumes, detaching volumes.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/#servers-with-volume-attachments-servers-os-volume-attachments
-admin_doc_link=http://docs.openstack.org/admin-guide/blockstorage-manage-volumes.html
+admin_doc_link=https://docs.openstack.org/cinder/latest/admin/blockstorage-manage-volumes.html
tempest_test_uuids=fff42874-7db5-4487-a8e1-ddda5fb5288d
cli=
libvirt-kvm=complete
@@ -176,7 +176,7 @@ notes=This is about supporting all the features of BDMv2.
only supports part of what the API allows.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/?expanded=create-image-createimage-action-detail#create-server
-admin_doc_link=http://docs.openstack.org/admin-guide/compute-manage-volumes.html
+admin_doc_link=https://docs.openstack.org/nova/latest/user/block-device-mapping.html
tempest_test_uuids=557cd2c2-4eb8-4dce-98be-f86765ff311b, 36c34c67-7b54-4b59-b188-02a2f458a63b
cli=
libvirt-kvm=complete
@@ -198,7 +198,7 @@ notes=This is about supporting booting from one or more neutron ports,
This does not include SR-IOV or similar, just simple neutron ports.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/?&expanded=create-server-detail
-admin_doc_link=http://docs.openstack.org/admin-guide/compute-manage-volumes.html
+admin_doc_link=
tempest_test_uuids=2f3a0127-95c7-4977-92d2-bc5aec602fb4
cli=
libvirt-kvm=complete
@@ -220,7 +220,7 @@ title=Pause a Server
notes=This is pause and unpause a server, where the state is held in memory.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/?#pause-server-pause-action
-admin_doc_link=http://docs.openstack.org/admin-guide/compute-images-instances.html
+admin_doc_link=
tempest_test_uuids=bd61a9fd-062f-4670-972b-2d6c3e3b9e73
cli=
libvirt-kvm=complete
@@ -240,7 +240,7 @@ title=Suspend a Server
notes=This suspend and resume a server, where the state is held on disk.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/?expanded=suspend-server-suspend-action-detail
-admin_doc_link=http://docs.openstack.org/admin-guide/compute-images-instances.html
+admin_doc_link=
tempest_test_uuids=0d8ee21e-b749-462d-83da-b85b41c86c7f
cli=
libvirt-kvm=complete
@@ -260,7 +260,7 @@ title=Server console output
notes=This gets the current server console output.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/#show-console-output-os-getconsoleoutput-action
-admin_doc_link=http://docs.openstack.org/user-guide/cli_access_instance_through_a_console.html
+admin_doc_link=
tempest_test_uuids=4b8867e6-fffa-4d54-b1d1-6fdda57be2f3
cli=
libvirt-kvm=complete
@@ -281,7 +281,7 @@ notes=This boots a server with a new root disk from the specified glance image
to allow a user to fix a boot partition configuration, or similar.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/#rescue-server-rescue-action
-admin_doc_link=http://docs.openstack.org/user-guide/cli_reboot_an_instance.html
+admin_doc_link=
tempest_test_uuids=fd032140-714c-42e4-a8fd-adcd8df06be6;70cdb8a1-89f8-437d-9448-8844fd82bf46
cli=
libvirt-kvm=complete
@@ -302,7 +302,7 @@ notes=This ensures the user data provided by the user when booting
a server is available in one of the expected config drive locations.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/#create-server
-admin_doc_link=http://docs.openstack.org/user-guide/cli_config_drive.html
+admin_doc_link=https://docs.openstack.org/nova/latest/user/config-drive.html
tempest_test_uuids=7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba
cli=
libvirt-kvm=complete
@@ -322,7 +322,7 @@ title=Server Change Password
notes=The ability to reset the password of a user within the server.
maturity=experimental
api_doc_link=http://developer.openstack.org/api-ref/compute/#change-administrative-password-changepassword-action
-admin_doc_link=http://docs.openstack.org/cli-reference/nova.html#nova-set-password
+admin_doc_link=
tempest_test_uuids=6158df09-4b82-4ab3-af6d-29cf36af858d
cli=
libvirt-kvm=partial
@@ -345,7 +345,7 @@ notes=The ability to keep a server logically alive, but not using any
a snapshot, called offloading.
maturity=complete
api_doc_link=http://developer.openstack.org/api-ref/compute/#shelve-server-shelve-action
-admin_doc_link=http://docs.openstack.org/user-guide/cli_stop_and_start_an_instance.html#shelve-and-unshelve-an-instance
+admin_doc_link=
tempest_test_uuids=1164e700-0af0-4a4c-8792-35909a88743c,c1b6318c-b9da-490b-9c67-9339b627271f
cli=
libvirt-kvm=complete
diff --git a/nova/cmd/__init__.py b/nova/cmd/__init__.py
index d1e331c39d..1b1ddf772c 100644
--- a/nova/cmd/__init__.py
+++ b/nova/cmd/__init__.py
@@ -14,6 +14,8 @@
# under the License.
import eventlet
+from oslo_utils import importutils
+from six.moves import reload_module
from nova import debugger
@@ -22,3 +24,9 @@ if debugger.enabled():
eventlet.monkey_patch(os=False, thread=False)
else:
eventlet.monkey_patch(os=False)
+
+# NOTE(rgerganov): oslo.context is storing a global thread-local variable
+# which keeps the request context for the current thread. If oslo.context is
+# imported before calling monkey_patch(), then this thread-local won't be
+# green. To workaround this, reload the module after calling monkey_patch()
+reload_module(importutils.import_module('oslo_context.context'))
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index cec2874a57..88e0f6bc9b 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1708,16 +1708,16 @@ class ComputeManager(manager.Manager):
return block_device_info
- def _build_failed(self):
+ def _build_failed(self, node):
if CONF.compute.consecutive_build_service_disable_threshold:
rt = self._get_resource_tracker()
# NOTE(danms): Update our counter, but wait for the next
# update_available_resource() periodic to flush it to the DB
- rt.stats.build_failed()
+ rt.build_failed(node)
- def _build_succeeded(self):
+ def _build_succeeded(self, node):
rt = self._get_resource_tracker()
- rt.stats.build_succeeded()
+ rt.build_succeeded(node)
@wrap_exception()
@reverts_task_state
@@ -1728,6 +1728,11 @@ class ComputeManager(manager.Manager):
security_groups=None, block_device_mapping=None,
node=None, limits=None):
+ if node is None:
+ node = self.driver.get_available_nodes(refresh=True)[0]
+ LOG.debug('No node specified, defaulting to %s', node,
+ instance=instance)
+
@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
# NOTE(danms): We grab the semaphore with the instance uuid
@@ -1766,9 +1771,9 @@ class ComputeManager(manager.Manager):
rt.reportclient.delete_allocation_for_instance(
instance.uuid)
- self._build_failed()
+ self._build_failed(node)
else:
- self._build_succeeded()
+ self._build_succeeded(node)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
@@ -1827,11 +1832,6 @@ class ComputeManager(manager.Manager):
if limits is None:
limits = {}
- if node is None:
- node = self.driver.get_available_nodes(refresh=True)[0]
- LOG.debug('No node specified, defaulting to %s', node,
- instance=instance)
-
try:
with timeutils.StopWatch() as timer:
self._build_and_run_instance(context, instance, image,
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index f5c5f3ba64..5d946333a4 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -26,7 +26,7 @@ from oslo_serialization import jsonutils
from nova.compute import claims
from nova.compute import monitors
-from nova.compute import stats
+from nova.compute import stats as compute_stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
@@ -136,7 +136,8 @@ class ResourceTracker(object):
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
- self.stats = stats.Stats()
+ # Dict of Stats objects, keyed by nodename
+ self.stats = collections.defaultdict(compute_stats.Stats)
self.tracked_instances = {}
self.tracked_migrations = {}
monitor_handler = monitors.MonitorHandler(self)
@@ -627,16 +628,18 @@ class ResourceTracker(object):
def _copy_resources(self, compute_node, resources):
"""Copy resource values to supplied compute_node."""
+ nodename = resources['hypervisor_hostname']
+ stats = self.stats[nodename]
# purge old stats and init with anything passed in by the driver
# NOTE(danms): Preserve 'failed_builds' across the stats clearing,
# as that is not part of resources
# TODO(danms): Stop doing this when we get a column to store this
# directly
- prev_failed_builds = self.stats.get('failed_builds', 0)
- self.stats.clear()
- self.stats['failed_builds'] = prev_failed_builds
- self.stats.digest_stats(resources.get('stats'))
- compute_node.stats = copy.deepcopy(self.stats)
+ prev_failed_builds = stats.get('failed_builds', 0)
+ stats.clear()
+ stats['failed_builds'] = prev_failed_builds
+ stats.digest_stats(resources.get('stats'))
+ compute_node.stats = stats
# update the allocation ratios for the related ComputeNode object
compute_node.ram_allocation_ratio = self.ram_allocation_ratio
@@ -920,7 +923,8 @@ class ResourceTracker(object):
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
- cn.running_vms = self.stats.num_instances
+ stats = self.stats[nodename]
+ cn.running_vms = stats.num_instances
# Calculate the numa usage
free = sign == -1
@@ -1082,8 +1086,9 @@ class ResourceTracker(object):
sign = -1
cn = self.compute_nodes[nodename]
- self.stats.update_stats_for_instance(instance, is_removed_instance)
- cn.stats = copy.deepcopy(self.stats)
+ stats = self.stats[nodename]
+ stats.update_stats_for_instance(instance, is_removed_instance)
+ cn.stats = stats
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
@@ -1099,7 +1104,7 @@ class ResourceTracker(object):
self._update_usage(self._get_usage_dict(instance), nodename,
sign=sign)
- cn.current_workload = self.stats.calculate_workload()
+ cn.current_workload = stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
@@ -1428,3 +1433,11 @@ class ResourceTracker(object):
if key in updates:
usage[key] = updates[key]
return usage
+
+ def build_failed(self, nodename):
+ """Increments the failed_builds stats for the given node."""
+ self.stats[nodename].build_failed()
+
+ def build_succeeded(self, nodename):
+ """Resets the failed_builds stats for the given node."""
+ self.stats[nodename].build_succeeded()
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 0789515e74..b8d460f155 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1833,11 +1833,15 @@ def instance_create(context, values):
def _instance_data_get_for_user(context, project_id, user_id):
+ not_soft_deleted = or_(
+ models.Instance.vm_state != vm_states.SOFT_DELETED,
+ models.Instance.vm_state == null()
+ )
result = model_query(context, models.Instance, (
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb))).\
- filter_by(project_id=project_id)
+ filter_by(project_id=project_id).filter(not_soft_deleted)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
@@ -6381,7 +6385,7 @@ def _archive_if_instance_deleted(table, shadow_table, instances, conn,
return result_delete.rowcount
except db_exc.DBReferenceError as ex:
LOG.warning('Failed to archive %(table)s: %(error)s',
- {'table': table.__tablename__,
+ {'table': table.name,
'error': six.text_type(ex)})
return 0
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index 1810c12a94..da6df09ad1 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -1222,7 +1222,10 @@ def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
@db_api.pick_context_manager_writer
def populate_missing_availability_zones(context, count):
+ # instances without host have no reasonable AZ to set
+ not_empty_host = models.Instance.host != None # noqa E711
instances = (context.session.query(models.Instance).
+ filter(not_empty_host).
filter_by(availability_zone=None).limit(count).all())
count_all = len(instances)
count_hit = 0
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
index 1c64998de3..a51ae5be77 100644
--- a/nova/objects/request_spec.py
+++ b/nova/objects/request_spec.py
@@ -497,6 +497,7 @@ class RequestSpec(base.NovaObject):
it was originally scheduled with.
"""
updates = self.obj_get_changes()
+ db_updates = None
# NOTE(alaski): The db schema is the full serialized object in a
# 'spec' column. If anything has changed we rewrite the full thing.
if updates:
@@ -522,7 +523,9 @@ class RequestSpec(base.NovaObject):
reason='already created')
updates = self._get_update_primitives()
-
+ if not updates:
+ raise exception.ObjectActionError(action='create',
+ reason='no fields are set')
db_spec = self._create_in_db(self._context, updates)
self._from_db_object(self._context, self, db_spec)
@@ -540,9 +543,11 @@ class RequestSpec(base.NovaObject):
@base.remotable
def save(self):
updates = self._get_update_primitives()
- db_spec = self._save_in_db(self._context, self.instance_uuid, updates)
- self._from_db_object(self._context, self, db_spec)
- self.obj_reset_changes()
+ if updates:
+ db_spec = self._save_in_db(self._context, self.instance_uuid,
+ updates)
+ self._from_db_object(self._context, self, db_spec)
+ self.obj_reset_changes()
@staticmethod
@db.api_context_manager.writer
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 6fcd979a5f..4feffe4530 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -376,7 +376,7 @@ class HostManager(object):
for agg in aggs:
self.aggs_by_id[agg.id] = agg
for host in agg.hosts:
- self.host_aggregates_map[host].add(agg.id)
+ self.host_aggregates_map[host.lower()].add(agg.id)
def update_aggregates(self, aggregates):
"""Updates internal HostManager information about aggregates."""
@@ -395,7 +395,7 @@ class HostManager(object):
for host in self.host_aggregates_map:
if (aggregate.id in self.host_aggregates_map[host]
and host not in aggregate.hosts):
- self.host_aggregates_map[host].remove(aggregate.id)
+ self.host_aggregates_map[host.lower()].remove(aggregate.id)
def delete_aggregate(self, aggregate):
"""Deletes internal HostManager information about a specific aggregate.
@@ -714,7 +714,7 @@ class HostManager(object):
def _get_aggregates_info(self, host):
return [self.aggs_by_id[agg_id] for agg_id in
- self.host_aggregates_map[host]]
+ self.host_aggregates_map[host.lower()]]
def _get_instances_by_host(self, context, host_name):
try:
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index 9559ceaee2..d156feb26a 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -23,6 +23,7 @@ from nova import context
from nova import objects
from nova.objects import fields
from nova import test
+from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api.openstack.placement import test_report_client
from nova.tests import uuidsentinel as uuids
@@ -150,10 +151,13 @@ class IronicResourceTrackerTest(test.TestCase):
self.rt.scheduler_client.reportclient = self.report_client
self.rt.reportclient = self.report_client
self.url = 'http://localhost/placement'
- self.create_fixtures()
+ self.instances = self.create_fixtures()
def create_fixtures(self):
for flavor in self.FLAVOR_FIXTURES.values():
+ # Clone the object so the class variable isn't
+ # modified by reference.
+ flavor = flavor.obj_clone()
flavor._context = self.ctx
flavor.obj_set_defaults()
flavor.create()
@@ -162,14 +166,23 @@ class IronicResourceTrackerTest(test.TestCase):
# data before adding integration for Ironic baremetal nodes with the
# placement API...
for cn in self.COMPUTE_NODE_FIXTURES.values():
+ # Clone the object so the class variable isn't
+ # modified by reference.
+ cn = cn.obj_clone()
cn._context = self.ctx
cn.obj_set_defaults()
cn.create()
+ instances = {}
for instance in self.INSTANCE_FIXTURES.values():
+ # Clone the object so the class variable isn't
+ # modified by reference.
+ instance = instance.obj_clone()
instance._context = self.ctx
instance.obj_set_defaults()
instance.create()
+ instances[instance.uuid] = instance
+ return instances
def placement_get_inventory(self, rp_uuid):
url = '/resource_providers/%s/inventories' % rp_uuid
@@ -293,7 +306,7 @@ class IronicResourceTrackerTest(test.TestCase):
# RT's instance_claim().
cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
cn1_nodename = cn1_obj.hypervisor_hostname
- inst = self.INSTANCE_FIXTURES[uuids.instance1]
+ inst = self.instances[uuids.instance1]
# Since we're pike, the scheduler would have created our
# allocation for us. So, we can use our old update routine
# here to mimic that before we go do the compute RT claim,
@@ -390,3 +403,81 @@ class IronicResourceTrackerTest(test.TestCase):
# request a single amount of that custom resource class, we will
# modify the allocation/claim to consume only the custom resource
# class and not the VCPU, MEMORY_MB and DISK_GB.
+
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
+ @mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock())
+ def test_node_stats_isolation(self):
+ """Regression test for bug 1784705 introduced in Ocata.
+
+ The ResourceTracker.stats field is meant to track per-node stats
+ so this test registers three compute nodes with a single RT where
+ each node has unique stats, and then makes sure that after updating
+ usage for an instance, the nodes still have their unique stats and
+ nothing is leaked from node to node.
+ """
+ self.useFixture(nova_fixtures.PlacementFixture())
+ # Before the resource tracker is "initialized", we shouldn't have
+ # any compute nodes or stats in the RT's cache...
+ self.assertEqual(0, len(self.rt.compute_nodes))
+ self.assertEqual(0, len(self.rt.stats))
+
+ # Now "initialize" the resource tracker. This is what
+ # nova.compute.manager.ComputeManager does when "initializing" the
+ # nova-compute service. Do this in a predictable order so cn1 is
+ # first and cn3 is last.
+ for cn in sorted(self.COMPUTE_NODE_FIXTURES.values(),
+ key=lambda _cn: _cn.hypervisor_hostname):
+ nodename = cn.hypervisor_hostname
+ # Fake that each compute node has unique extra specs stats and
+ # the RT makes sure those are unique per node.
+ stats = {'node:%s' % nodename: nodename}
+ self.driver_mock.get_available_resource.return_value = {
+ 'hypervisor_hostname': nodename,
+ 'hypervisor_type': 'ironic',
+ 'hypervisor_version': 0,
+ 'vcpus': cn.vcpus,
+ 'vcpus_used': cn.vcpus_used,
+ 'memory_mb': cn.memory_mb,
+ 'memory_mb_used': cn.memory_mb_used,
+ 'local_gb': cn.local_gb,
+ 'local_gb_used': cn.local_gb_used,
+ 'numa_topology': None,
+ 'resource_class': None, # Act like admin hasn't set yet...
+ 'stats': stats,
+ }
+ self.driver_mock.get_inventory.return_value = {
+ 'CUSTOM_SMALL_IRON': {
+ 'total': 1,
+ 'reserved': 0,
+ 'min_unit': 1,
+ 'max_unit': 1,
+ 'step_size': 1,
+ 'allocation_ratio': 1.0,
+ },
+ }
+ self.rt.update_available_resource(self.ctx, nodename)
+
+ self.assertEqual(3, len(self.rt.compute_nodes))
+ self.assertEqual(3, len(self.rt.stats))
+
+ def _assert_stats():
+ # Make sure each compute node has a unique set of stats and
+ # they don't accumulate across nodes.
+ for _cn in self.rt.compute_nodes.values():
+ node_stats_key = 'node:%s' % _cn.hypervisor_hostname
+ self.assertIn(node_stats_key, _cn.stats)
+ node_stat_count = 0
+ for stat in _cn.stats:
+ if stat.startswith('node:'):
+ node_stat_count += 1
+ self.assertEqual(1, node_stat_count, _cn.stats)
+ _assert_stats()
+
+ # Now "spawn" an instance to the first compute node by calling the
+ # RT's instance_claim().
+ cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
+ cn1_nodename = cn1_obj.hypervisor_hostname
+ inst = self.instances[uuids.instance1]
+ with self.rt.instance_claim(self.ctx, inst, cn1_nodename):
+ _assert_stats()
diff --git a/nova/tests/functional/db/test_instance.py b/nova/tests/functional/db/test_instance.py
index 38b530134a..4394f557b4 100644
--- a/nova/tests/functional/db/test_instance.py
+++ b/nova/tests/functional/db/test_instance.py
@@ -50,17 +50,20 @@ class InstanceObjectTestCase(test.TestCase):
uuid1 = inst1.uuid
inst2 = self._create_instance(availability_zone="fake",
host="fake-host2")
+ # ... and one without a host (simulating failed spawn)
+ self._create_instance(host=None)
+
self.assertIsNone(inst1.availability_zone)
self.assertEqual("fake", inst2.availability_zone)
count_all, count_hit = (objects.instance.
populate_missing_availability_zones(self.context, 10))
- # we get only the instance whose avz was None.
+ # we get only the instance whose avz was None and where host is set
self.assertEqual(1, count_all)
self.assertEqual(1, count_hit)
+ # since instance has no avz, avz is set by get_host_availability_zone
+ # to CONF.default_availability_zone i.e 'nova' which is the default
+ # zone for compute services.
inst1 = objects.Instance.get_by_uuid(self.context, uuid1)
- # since instance.host was None, avz is set to
- # CONF.default_availability_zone i.e 'nova' which is the default zone
- # for compute services.
self.assertEqual('nova', inst1.availability_zone)
# create an instance with avz as None on a host that has avz.
diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py
index 4751d7bb0f..a01440cf44 100644
--- a/nova/tests/unit/db/test_db_api.py
+++ b/nova/tests/unit/db/test_db_api.py
@@ -1347,6 +1347,24 @@ class SqlAlchemyDbApiTestCase(DbTestCase):
filters={},
sort_keys=keys)
+ def test_instance_data_get_for_user(self):
+ ctxt = context.get_admin_context()
+ instance_1 = self.create_instance_with_args(project_id='project-HHD')
+ self.create_instance_with_args(project_id='project-HHD')
+
+ @sqlalchemy_api.pick_context_manager_reader
+ def test(context):
+ return sqlalchemy_api._instance_data_get_for_user(
+ context, 'project-HHD', None)
+
+ inst_num, _, _ = test(ctxt)
+ self.assertEqual(2, inst_num)
+
+ db.instance_update(ctxt, instance_1['uuid'],
+ {"vm_state": vm_states.SOFT_DELETED})
+ inst_num_2, _, _ = test(ctxt)
+ self.assertEqual(1, inst_num_2)
+
class ProcessSortParamTestCase(test.TestCase):
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index 36f2b6be7e..fb87312e3a 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -635,6 +635,16 @@ class _TestRequestSpecObject(object):
objects.SecurityGroupList)
self.assertIn('security_groups', req_obj)
+ def test_create_raises_on_unchanged_object(self):
+ ctxt = context.RequestContext(uuids.user_id, uuids.project_id)
+ req_obj = request_spec.RequestSpec(context=ctxt)
+ self.assertRaises(exception.ObjectActionError, req_obj.create)
+
+ def test_save_can_be_called_on_unchanged_object(self):
+ req_obj = fake_request_spec.fake_spec_obj(remove_id=True)
+ req_obj.create()
+ req_obj.save()
+
class TestRequestSpecObject(test_objects._LocalTest,
_TestRequestSpecObject):
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
index 42a263c323..c147780ff1 100644
--- a/nova/tests/unit/scheduler/test_host_manager.py
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -173,6 +173,17 @@ class HostManagerTestCase(test.NoDBTestCase):
self.assertEqual({'fake-host': set([1])},
self.host_manager.host_aggregates_map)
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(objects.AggregateList, 'get_all')
+ def test_init_aggregates_one_agg_with_hosts_upper_case(self, agg_get_all,
+ mock_init_info):
+ fake_agg = objects.Aggregate(id=1, hosts=['FAKE-host'])
+ agg_get_all.return_value = [fake_agg]
+ self.host_manager = host_manager.HostManager()
+ self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
+ self.assertEqual({'fake-host': set([1])},
+ self.host_manager.host_aggregates_map)
+
def test_update_aggregates(self):
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
self.host_manager.update_aggregates([fake_agg])
diff --git a/nova/tests/unit/virt/libvirt/fakelibvirt.py b/nova/tests/unit/virt/libvirt/fakelibvirt.py
index 306f1150b8..c64ad32ef6 100644
--- a/nova/tests/unit/virt/libvirt/fakelibvirt.py
+++ b/nova/tests/unit/virt/libvirt/fakelibvirt.py
@@ -776,6 +776,9 @@ class Domain(object):
def migrateSetMaxDowntime(self, downtime):
pass
+ def migrateSetMaxSpeed(self, bandwidth):
+ pass
+
def attachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index baead43ab2..cb7091568c 100755
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -8511,7 +8511,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
guest = libvirt_guest.Guest(vdmock)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
- _bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
initial_xml)
vdmock.migrateToURI2(drvr._live_migration_uri('dest'),
@@ -8533,8 +8533,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, guest, [],
- _bandwidth)
+ False, migrate_data, guest, [])
def test_live_migration_parallels_no_new_xml(self):
self.flags(virt_type='parallels', group='libvirt')
@@ -8549,14 +8548,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
block_migration=False)
dom_mock = mock.MagicMock()
guest = libvirt_guest.Guest(dom_mock)
- _bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
drvr._live_migration_operation(self.context, instance, 'dest',
- False, migrate_data, guest, [],
- bandwidth=_bandwidth)
+ False, migrate_data, guest, [])
# when new xml is not passed we fall back to migrateToURI
dom_mock.migrateToURI.assert_called_once_with(
drvr._live_migration_uri('dest'),
- flags=0, bandwidth=_bandwidth)
+ flags=0, bandwidth=0)
@mock.patch.object(utils, 'spawn')
@mock.patch.object(host.Host, 'get_guest')
@@ -8578,13 +8575,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance = objects.Instance(**instance_dict)
- instance.info_cache = objects.InstanceInfoCache(
- network_info=_fake_network_info(self, 1))
migrate_data = objects.LibvirtLiveMigrateData(
block_migration=True)
dom = fakelibvirt.Domain(drvr._get_connection(), '<domain/>', True)
guest = libvirt_guest.Guest(dom)
- guest.migrate_configure_max_speed = mock.MagicMock()
mock_guest.return_value = guest
drvr._live_migration(self.context, instance, 'dest',
lambda: None, lambda: None, True,
@@ -8593,9 +8587,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, 'dest', True,
- migrate_data, guest, [], libvirt_driver.MIN_MIGRATION_SPEED_BW)
- guest.migrate_configure_max_speed.assert_called_once_with(
- CONF.libvirt.live_migration_bandwidth)
+ migrate_data, guest, [])
def test_live_migration_update_volume_xml(self):
self.compute = manager.ComputeManager()
@@ -8647,8 +8639,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
test_mock.XMLDesc.return_value = target_xml
self.assertFalse(drvr._live_migration_operation(
self.context, instance_ref, 'dest', False,
- migrate_data, guest, [],
- libvirt_driver.MIN_MIGRATION_SPEED_BW))
+ migrate_data, guest, []))
mupdate.assert_called_once_with(
guest, migrate_data, mock.ANY)
@@ -8688,7 +8679,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_mock = mock.MagicMock()
guest = libvirt_guest.Guest(test_mock)
- _bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
with mock.patch.object(libvirt_migrate,
'get_updated_guest_xml') as mupdate:
@@ -8696,11 +8686,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
test_mock.XMLDesc.return_value = target_xml
drvr._live_migration_operation(self.context, instance_ref,
'dest', False, migrate_data,
- guest, [], _bandwidth)
+ guest, [])
test_mock.migrateToURI2.assert_called_once_with(
'qemu+tcp://127.0.0.2/system',
miguri='tcp://127.0.0.2',
- dxml=mupdate(), flags=0, bandwidth=_bandwidth)
+ dxml=mupdate(), flags=0, bandwidth=0)
def test_update_volume_xml(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -8961,7 +8951,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_migrate.side_effect = fakelibvirt.libvirtError("ERR")
# start test
- bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
+ bandwidth = CONF.libvirt.live_migration_bandwidth
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
@@ -8976,8 +8966,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, guest, [],
- bandwidth=bandwidth)
+ False, migrate_data, guest, [])
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrate.assert_called_once_with(
@@ -9009,8 +8998,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, guest, [],
- bandwidth=libvirt_driver.MIN_MIGRATION_SPEED_BW)
+ False, migrate_data, guest, [])
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@@ -9025,7 +9013,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
disk_paths = ['vda', 'vdb']
params = {
'migrate_disks': ['vda', 'vdb'],
- 'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
+ 'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': '',
}
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
@@ -9047,8 +9035,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance, 'dest',
- False, migrate_data, guest, disk_paths,
- libvirt_driver.MIN_MIGRATION_SPEED_BW)
+ False, migrate_data, guest, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri('dest'),
params=params, flags=0)
@@ -9073,15 +9060,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._parse_migration_flags()
- _bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
instance = objects.Instance(**self.test_instance)
drvr._live_migration_operation(self.context, instance, 'dest',
True, migrate_data, guest,
- device_names, _bandwidth)
+ device_names)
params = {
'migrate_disks': device_names,
- 'bandwidth': _bandwidth,
+ 'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': b'<xml/>',
}
mock_migrateToURI3.assert_called_once_with(
@@ -9120,9 +9106,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(live_migration_tunnelled=True, group='libvirt')
# Preparing mocks
disk_paths = []
- _bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
params = {
- 'bandwidth': _bandwidth,
+ 'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': '',
}
# Start test
@@ -9141,8 +9126,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._parse_migration_flags()
instance = objects.Instance(**self.test_instance)
drvr._live_migration_operation(self.context, instance, 'dest',
- True, migrate_data, guest, disk_paths,
- _bandwidth)
+ True, migrate_data, guest, disk_paths)
expected_flags = (fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
fakelibvirt.VIR_MIGRATE_TUNNELLED |
@@ -9168,7 +9152,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
guest = libvirt_guest.Guest(vdmock)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
- _bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE
).AndReturn(FakeVirtDomain().XMLDesc(flags=0))
vdmock.migrateToURI2(drvr._live_migration_uri('dest'),
@@ -9190,8 +9174,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, guest, [],
- _bandwidth)
+ False, migrate_data, guest, [])
self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
self.assertEqual(power_state.RUNNING, instance_ref.power_state)
@@ -10226,87 +10209,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_live_migration_main(self, mock_copy_disk_path, mock_running,
mock_guest, mock_monitor, mock_thread,
mock_conn):
- virtapi = manager.ComputeVirtAPI(mock.MagicMock())
- drvr = libvirt_driver.LibvirtDriver(virtapi, False)
-
- instance = objects.Instance(**self.test_instance)
- instance.info_cache = objects.InstanceInfoCache(
- network_info=network_model.NetworkInfo([
- network_model.VIF(id=uuids.vif_1,
- type=network_model.VIF_TYPE_BRIDGE)]))
-
- dom = fakelibvirt.Domain(drvr._get_connection(),
- "<domain><name>demo</name></domain>", True)
- guest = libvirt_guest.Guest(dom)
- migrate_data = objects.LibvirtLiveMigrateData(block_migration=True)
- disks_to_copy = (['/some/path/one', '/test/path/two'],
- ['vda', 'vdb'])
- mock_copy_disk_path.return_value = disks_to_copy
-
- mock_guest.return_value = guest
- guest.migrate_configure_max_speed = mock.MagicMock()
-
- generated_events = []
-
- def fake_post():
- pass
-
- def fake_recover():
- pass
-
- def fake_prepare(instance, event_name):
- ev = mock.MagicMock(instance=instance, event_name=event_name)
- ev.wait.return_value = mock.MagicMock(status='completed')
- generated_events.append(ev)
- return ev
-
- prepare = virtapi._compute.instance_events.prepare_for_instance_event
- prepare.side_effect = fake_prepare
-
- drvr._live_migration(self.context, instance, "fakehost",
- fake_post, fake_recover, True,
- migrate_data)
- mock_copy_disk_path.assert_called_once_with(self.context, instance,
- guest)
-
- class AnyEventletEvent(object):
- def __eq__(self, other):
- return type(other) == eventlet.event.Event
-
- mock_thread.assert_called_once_with(
- drvr._live_migration_operation,
- self.context, instance, "fakehost", True,
- migrate_data, guest, disks_to_copy[1],
- libvirt_driver.MIN_MIGRATION_SPEED_BW)
- mock_monitor.assert_called_once_with(
- self.context, instance, guest, "fakehost",
- fake_post, fake_recover, True,
- migrate_data, AnyEventletEvent(), disks_to_copy[0])
- guest.migrate_configure_max_speed.assert_called_once_with(
- CONF.libvirt.live_migration_bandwidth)
-
- prepare.assert_has_calls([
- mock.call(instance, 'network-vif-plugged-%s' % uuids.vif_1)])
- for event in generated_events:
- event.wait.assert_called_once_with()
-
- @mock.patch.object(host.Host, "get_connection")
- @mock.patch.object(utils, "spawn")
- @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
- @mock.patch.object(host.Host, "get_guest")
- @mock.patch.object(fakelibvirt.Connection, "_mark_running")
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_live_migration_copy_disk_paths")
- def test_live_migration_ovs_vif(self, mock_copy_disk_path, mock_running,
- mock_guest, mock_monitor, mock_thread,
- mock_conn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
- instance.info_cache = objects.InstanceInfoCache(
- network_info=network_model.NetworkInfo([
- network_model.VIF(id=uuids.vif_1,
- type=network_model.VIF_TYPE_OVS)]))
-
dom = fakelibvirt.Domain(drvr._get_connection(),
"<domain><name>demo</name></domain>", True)
guest = libvirt_guest.Guest(dom)
@@ -10316,7 +10220,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_copy_disk_path.return_value = disks_to_copy
mock_guest.return_value = guest
- guest.migrate_configure_max_speed = mock.MagicMock()
def fake_post():
pass
@@ -10337,70 +10240,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", True,
- migrate_data, guest, disks_to_copy[1],
- CONF.libvirt.live_migration_bandwidth)
+ migrate_data, guest, disks_to_copy[1])
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, True,
migrate_data, AnyEventletEvent(), disks_to_copy[0])
- guest.migrate_configure_max_speed.assert_not_called()
-
- @mock.patch.object(host.Host, "get_connection")
- @mock.patch.object(utils, "spawn")
- @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
- @mock.patch.object(host.Host, "get_guest")
- @mock.patch.object(fakelibvirt.Connection, "_mark_running")
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_live_migration_copy_disk_paths")
- def test_live_migration_bridge_no_events(self, mock_copy_disk_path,
- mock_running, mock_guest,
- mock_monitor, mock_thread,
- mock_conn):
- self.flags(vif_plugging_timeout=0)
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = objects.Instance(**self.test_instance)
- instance.info_cache = objects.InstanceInfoCache(
- network_info=network_model.NetworkInfo([
- network_model.VIF(id=uuids.vif_1,
- type=network_model.VIF_TYPE_BRIDGE)]))
-
- dom = fakelibvirt.Domain(drvr._get_connection(),
- "<domain><name>demo</name></domain>", True)
- guest = libvirt_guest.Guest(dom)
- migrate_data = objects.LibvirtLiveMigrateData(block_migration=True)
- disks_to_copy = (['/some/path/one', '/test/path/two'],
- ['vda', 'vdb'])
- mock_copy_disk_path.return_value = disks_to_copy
-
- mock_guest.return_value = guest
- guest.migrate_configure_max_speed = mock.MagicMock()
-
- def fake_post():
- pass
-
- def fake_recover():
- pass
-
- drvr._live_migration(self.context, instance, "fakehost",
- fake_post, fake_recover, True,
- migrate_data)
- mock_copy_disk_path.assert_called_once_with(self.context, instance,
- guest)
-
- class AnyEventletEvent(object):
- def __eq__(self, other):
- return type(other) == eventlet.event.Event
-
- mock_thread.assert_called_once_with(
- drvr._live_migration_operation,
- self.context, instance, "fakehost", True,
- migrate_data, guest, disks_to_copy[1],
- CONF.libvirt.live_migration_bandwidth)
- mock_monitor.assert_called_once_with(
- self.context, instance, guest, "fakehost",
- fake_post, fake_recover, True,
- migrate_data, AnyEventletEvent(), disks_to_copy[0])
- guest.migrate_configure_max_speed.assert_not_called()
def _do_test_create_images_and_backing(self, disk_type):
instance = objects.Instance(**self.test_instance)
@@ -15413,8 +15257,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(vm_state=vm_states.BUILDING,
**self.test_instance)
- vifs = [{'id': uuids.vif_1, 'active': False},
- {'id': uuids.vif_2, 'active': False}]
+ vifs = [{'id': 'vif1', 'active': False},
+ {'id': 'vif2', 'active': False}]
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, 'firewall_driver')
@@ -15440,8 +15284,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
prepare.assert_has_calls([
- mock.call(instance, 'network-vif-plugged-%s' % uuids.vif_1),
- mock.call(instance, 'network-vif-plugged-%s' % uuids.vif_2)])
+ mock.call(instance, 'network-vif-plugged-vif1'),
+ mock.call(instance, 'network-vif-plugged-vif2')])
for event in generated_events:
if neutron_failure and generated_events.index(event) != 0:
self.assertEqual(0, event.call_count)
@@ -15685,15 +15529,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
volume_save.assert_called_once_with()
- def test_get_neutron_events_for_live_migration(self):
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- network_info = [network_model.VIF(id=uuids.vif_ovs,
- type=network_model.VIF_TYPE_OVS),
- network_model.VIF(id=uuids.vif_bridge,
- type=network_model.VIF_TYPE_BRIDGE)]
- events = drvr._get_neutron_events_for_live_migration(network_info)
- self.assertEqual([('network-vif-plugged', uuids.vif_bridge)], events)
-
def test_get_neutron_events(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id='1'),
diff --git a/nova/tests/unit/virt/libvirt/test_guest.py b/nova/tests/unit/virt/libvirt/test_guest.py
index 0bc8273cdf..ac22cd4151 100644
--- a/nova/tests/unit/virt/libvirt/test_guest.py
+++ b/nova/tests/unit/virt/libvirt/test_guest.py
@@ -662,6 +662,10 @@ class GuestTestCase(test.NoDBTestCase):
self.guest.migrate_configure_max_downtime(1000)
self.domain.migrateSetMaxDowntime.assert_called_once_with(1000)
+ def test_migrate_configure_max_speed(self):
+ self.guest.migrate_configure_max_speed(1000)
+ self.domain.migrateSetMaxSpeed.assert_called_once_with(1000)
+
class GuestBlockTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
index 1e7f2ee7aa..78c96e6998 100644
--- a/nova/tests/unit/virt/test_block_device.py
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -239,6 +239,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.context, inst, vol_api, virt)
self.assertFalse(log.exception.called)
self.assertTrue(log.warning.called)
+ vol_api.roll_detaching.assert_called_once_with(self.context,
+ driver_bdm.volume_id)
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index 570186af3d..0c32b1f2fd 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -649,8 +649,6 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def test_live_migration(self):
instance_ref, network_info = self._get_running_instance()
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=network_info)
fake_context = context.RequestContext('fake', 'fake')
migration = objects.Migration(context=fake_context, id=1)
migrate_data = objects.LibvirtLiveMigrateData(
diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py
index aecb342f0f..fdcd9fa184 100644
--- a/nova/virt/block_device.py
+++ b/nova/virt/block_device.py
@@ -285,6 +285,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
with excutils.save_and_reraise_exception():
LOG.warning('Guest refused to detach volume %(vol)s',
{'vol': volume_id}, instance=instance)
+ volume_api.roll_detaching(context, volume_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to detach volume '
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 03c62b12a5..d3f945590d 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -301,8 +301,6 @@ PERF_EVENTS_CPU_FLAG_MAPPING = {'cmt': 'cmt',
'mbmt': 'mbm_total',
}
-MIN_MIGRATION_SPEED_BW = 1 # 1 MiB/s
-
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
@@ -5354,12 +5352,6 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
- def _neutron_failed_live_migration_callback(self, event_name, instance):
- msg = ('Neutron reported failure during live migration '
- 'with %(event)s for instance %(uuid)s' %
- {'event': event_name, 'uuid': instance.uuid})
- raise exception.MigrationError(reason=msg)
-
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
@@ -5369,16 +5361,6 @@ class LibvirtDriver(driver.ComputeDriver):
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
- def _get_neutron_events_for_live_migration(self, network_info):
- # Neutron should send events to Nova indicating that the VIFs
- # are successfully plugged on destination host.
-
- # TODO(sahid): Currently we only use the mechanism of waiting
- # for neutron events during live-migration for linux-bridge.
- return [('network-vif-plugged', vif['id'])
- for vif in network_info if (
- vif.get('type') == network_model.VIF_TYPE_BRIDGE)]
-
def _cleanup_failed_start(self, context, instance, network_info,
block_device_info, guest, destroy_disks):
try:
@@ -6462,7 +6444,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _live_migration_operation(self, context, instance, dest,
block_migration, migrate_data, guest,
- device_names, bandwidth):
+ device_names):
"""Invoke the live migration operation
:param context: security context
@@ -6475,7 +6457,6 @@ class LibvirtDriver(driver.ComputeDriver):
:param guest: the guest domain object
:param device_names: list of device names that are being migrated with
instance
- :param bandwidth: MiB/s of bandwidth allowed for the migration at start
This method is intended to be run in a background thread and will
block that thread until the migration is finished or failed.
@@ -6549,7 +6530,7 @@ class LibvirtDriver(driver.ComputeDriver):
flags=migration_flags,
params=params,
domain_xml=new_xml_str,
- bandwidth=bandwidth)
+ bandwidth=CONF.libvirt.live_migration_bandwidth)
for hostname, port in serial_ports:
serial_console.release_port(host=hostname, port=port)
@@ -6891,58 +6872,11 @@ class LibvirtDriver(driver.ComputeDriver):
disk_paths, device_names = self._live_migration_copy_disk_paths(
context, instance, guest)
- deadline = CONF.vif_plugging_timeout
- if utils.is_neutron() and deadline:
- # We don't generate events if CONF.vif_plugging_timeout=0
- # meaning that the operator disabled using them.
-
- # In case of Linux Bridge, the agent is waiting for new
- # TAP devices on destination node. They are going to be
- # created by libvirt at the very beginning of the
- # live-migration process. Then receiving the events from
- # Neutron will ensure that everything is configured
- # correctly.
- events = self._get_neutron_events_for_live_migration(
- instance.get_network_info())
- else:
- # TODO(sahid): This 'is_neutron()' condition should be
- # removed when nova-network will be erased from the tree
- # (Rocky).
- events = []
-
- if events:
- # We start migration with the minimum bandwidth
- # speed. Depending on the VIF type (see:
- # _get_neutron_events_for_live_migration) we will wait for
- # Neutron to send events that confirm network is setup or
- # directly configure QEMU to use the maximun BW allowed.
- bandwidth = MIN_MIGRATION_SPEED_BW
- else:
- bandwidth = CONF.libvirt.live_migration_bandwidth
-
- try:
- error_cb = self._neutron_failed_live_migration_callback
- with self.virtapi.wait_for_instance_event(instance, events,
- deadline=deadline,
- error_callback=error_cb):
- opthread = utils.spawn(self._live_migration_operation,
- context, instance, dest,
- block_migration,
- migrate_data, guest,
- device_names, bandwidth)
- except eventlet.timeout.Timeout:
- msg = ('Timeout waiting for VIF plugging events, '
- 'canceling migration')
- raise exception.MigrationError(reason=msg)
- else:
- if utils.is_neutron() and events:
- LOG.debug('VIF events received, continuing migration '
- 'with max bandwidth configured: %d',
- CONF.libvirt.live_migration_bandwidth,
- instance=instance)
- # Configure QEMU to use the maximum bandwidth allowed.
- guest.migrate_configure_max_speed(
- CONF.libvirt.live_migration_bandwidth)
+ opthread = utils.spawn(self._live_migration_operation,
+ context, instance, dest,
+ block_migration,
+ migrate_data, guest,
+ device_names)
finish_event = eventlet.event.Event()
self.active_migrations[instance.uuid] = deque()
diff --git a/nova/virt/libvirt/guest.py b/nova/virt/libvirt/guest.py
index 34aaa6f413..88bde0c734 100755
--- a/nova/virt/libvirt/guest.py
+++ b/nova/virt/libvirt/guest.py
@@ -684,6 +684,13 @@ class Guest(object):
"""
self._domain.migrateSetMaxDowntime(mstime)
+ def migrate_configure_max_speed(self, bandwidth):
+ """The maximum bandwidth that will be used to do migration
+
+ :param bw: Bandwidth in MiB/s
+ """
+ self._domain.migrateSetMaxSpeed(bandwidth)
+
def migrate_start_postcopy(self):
"""Switch running live migration to post-copy mode"""
self._domain.migrateStartPostCopy()
diff --git a/releasenotes/notes/bug-1414559-880d6b3c1ce3b95e.yaml b/releasenotes/notes/bug-1414559-880d6b3c1ce3b95e.yaml
deleted file mode 100644
index 0a760d4093..0000000000
--- a/releasenotes/notes/bug-1414559-880d6b3c1ce3b95e.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-upgrade:
- - |
- Live migrations are now initially slowed to ensure Neutron is given
- adequate time to wire up the VIFs on the destination. Once complete Neutron
- will send an event to Nova returning the migration to full speed. This
- requires Neutron >=11.0.4 on Pike when used with LinuxBridge VIFs in order
- to pick up the Icb039ae2d465e3822ab07ae4f9bc405c1362afba bugfix.