summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/source/filter_scheduler.rst8
-rw-r--r--nova/compute/api.py59
-rw-r--r--nova/compute/manager.py25
-rw-r--r--nova/conductor/manager.py25
-rw-r--r--nova/db/sqlalchemy/api.py5
-rw-r--r--nova/network/model.py7
-rw-r--r--nova/network/neutronv2/api.py2
-rw-r--r--nova/objects/instance.py6
-rw-r--r--nova/scheduler/filters/isolated_hosts_filter.py5
-rw-r--r--nova/tests/functional/api/client.py9
-rw-r--r--nova/tests/functional/regressions/test_bug_1713783.py130
-rw-r--r--nova/tests/functional/regressions/test_bug_1746483.py106
-rw-r--r--nova/tests/unit/compute/test_compute_api.py28
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py241
-rw-r--r--nova/tests/unit/conductor/test_conductor.py49
-rw-r--r--nova/tests/unit/db/test_db_api.py22
-rw-r--r--nova/tests/unit/network/test_network_info.py5
-rw-r--r--nova/tests/unit/network/test_neutronv2.py3
-rw-r--r--nova/tests/unit/objects/test_instance.py14
-rwxr-xr-xnova/tests/unit/virt/libvirt/test_driver.py38
-rw-r--r--nova/tests/unit/virt/libvirt/test_migration.py67
-rw-r--r--nova/tests/unit/virt/test_block_device.py25
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py2
-rw-r--r--nova/virt/block_device.py10
-rw-r--r--nova/virt/driver.py3
-rw-r--r--nova/virt/fake.py2
-rw-r--r--nova/virt/libvirt/driver.py22
-rw-r--r--nova/virt/libvirt/migration.py15
-rw-r--r--releasenotes/notes/bug-1739593-cve-2017-18191-25fe48d336d8cf13.yaml9
29 files changed, 749 insertions, 193 deletions
diff --git a/doc/source/filter_scheduler.rst b/doc/source/filter_scheduler.rst
index daf47539fe..4e5683b39c 100644
--- a/doc/source/filter_scheduler.rst
+++ b/doc/source/filter_scheduler.rst
@@ -101,7 +101,7 @@ There are many standard filter classes which may be used
fall back to the global default ``cpu_allocation_ratio``. If more than one value
is found for a host (meaning the host is in two different aggregates with
different ratio settings), the minimum value will be used.
-* |IsolatedHostsFilter| - filter based on ``image_isolated``, ``host_isolated``
+* |IsolatedHostsFilter| - filter based on ``isolated_images``, ``isolated_hosts``
and ``restrict_isolated_hosts_to_isolated_images`` flags.
* |JsonFilter| - allows simple JSON-based grammar for selecting hosts.
* |RamFilter| - filters hosts by their RAM. Only hosts with sufficient RAM
@@ -239,9 +239,9 @@ enabled and operational.
Now we are going to |IsolatedHostsFilter|. There can be some special hosts
reserved for specific images. These hosts are called **isolated**. So the
images to run on the isolated hosts are also called isolated. The filter
-checks if ``image_isolated`` flag named in instance specifications is the same
-as the host. Isolated hosts can run non isolated images if the flag
-``restrict_isolated_hosts_to_isolated_images`` is set to false.
+checks if ``isolated_images`` flag named in instance specifications is the same
+as the host specified in ``isolated_hosts``. Isolated hosts can run non-isolated
+images if the flag ``restrict_isolated_hosts_to_isolated_images`` is set to false.
|DifferentHostFilter| - method ``host_passes`` returns ``True`` if the host to
place an instance on is different from all the hosts used by a set of instances.
diff --git a/nova/compute/api.py b/nova/compute/api.py
index b3be3669cc..3038948143 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -2894,6 +2894,8 @@ class API(base.Base):
quiesced = False
if instance.vm_state == vm_states.ACTIVE:
try:
+ LOG.info(_LI("Attempting to quiesce instance before volume "
+ "snapshot."), instance=instance)
self.compute_rpcapi.quiesce_instance(context, instance)
quiesced = True
except (exception.InstanceQuiesceNotSupported,
@@ -2911,28 +2913,43 @@ class API(base.Base):
context, instance.uuid)
mapping = []
- for bdm in bdms:
- if bdm.no_device:
- continue
-
- if bdm.is_volume:
- # create snapshot based on volume_id
- volume = self.volume_api.get(context, bdm.volume_id)
- # NOTE(yamahata): Should we wait for snapshot creation?
- # Linux LVM snapshot creation completes in
- # short time, it doesn't matter for now.
- name = _('snapshot for %s') % image_meta['name']
- LOG.debug('Creating snapshot from volume %s.', volume['id'],
- instance=instance)
- snapshot = self.volume_api.create_snapshot_force(
- context, volume['id'], name, volume['display_description'])
- mapping_dict = block_device.snapshot_from_bdm(snapshot['id'],
- bdm)
- mapping_dict = mapping_dict.get_image_mapping()
- else:
- mapping_dict = bdm.get_image_mapping()
+ try:
+ for bdm in bdms:
+ if bdm.no_device:
+ continue
- mapping.append(mapping_dict)
+ if bdm.is_volume:
+ # create snapshot based on volume_id
+ volume = self.volume_api.get(context, bdm.volume_id)
+ # NOTE(yamahata): Should we wait for snapshot creation?
+ # Linux LVM snapshot creation completes in short time,
+ # it doesn't matter for now.
+ name = _('snapshot for %s') % image_meta['name']
+ LOG.debug('Creating snapshot from volume %s.',
+ volume['id'], instance=instance)
+ snapshot = self.volume_api.create_snapshot_force(
+ context, volume['id'],
+ name, volume['display_description'])
+ mapping_dict = block_device.snapshot_from_bdm(
+ snapshot['id'], bdm)
+ mapping_dict = mapping_dict.get_image_mapping()
+ else:
+ mapping_dict = bdm.get_image_mapping()
+
+ mapping.append(mapping_dict)
+ # NOTE(tasker): No error handling is done in the above for loop.
+ # This means that if the snapshot fails and throws an exception
+ # the traceback will skip right over the unquiesce needed below.
+ # Here, catch any exception, unquiesce the instance, and raise the
+ # error so that the calling function can do what it needs to in
+ # order to properly treat a failed snap.
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ if quiesced:
+ LOG.info(_LI("Unquiescing instance after volume snapshot "
+ "failure."), instance=instance)
+ self.compute_rpcapi.unquiesce_instance(
+ context, instance, mapping)
if quiesced:
self.compute_rpcapi.unquiesce_instance(context, instance, mapping)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 2e83f07ae6..1f6bbd4039 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1796,6 +1796,8 @@ class ComputeManager(manager.Manager):
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
+ self._cleanup_volumes(context, instance.uuid,
+ block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info(),
fault_message=e.kwargs['reason'])
@@ -2722,23 +2724,14 @@ class ComputeManager(manager.Manager):
LOG.info(_LI("Rebuilding instance"), instance=instance)
- # NOTE(gyee): there are three possible scenarios.
- #
- # 1. instance is being rebuilt on the same node. In this case,
- # recreate should be False and scheduled_node should be None.
- # 2. instance is being rebuilt on a node chosen by the
- # scheduler (i.e. evacuate). In this case, scheduled_node should
- # be specified and recreate should be True.
- # 3. instance is being rebuilt on a node chosen by the user. (i.e.
- # force evacuate). In this case, scheduled_node is not specified
- # and recreate is set to True.
- #
- # For scenarios #2 and #3, we must do rebuild claim as server is
- # being evacuated to a different node.
- if recreate or scheduled_node is not None:
+ if recreate:
+ # This is an evacuation to a new host, so we need to perform a
+ # resource claim.
rt = self._get_resource_tracker()
rebuild_claim = rt.rebuild_claim
else:
+ # This is a rebuild to the same host, so we don't need to make
+ # a claim since the instance is already on this host.
rebuild_claim = claims.NopClaim
image_meta = {}
@@ -5015,8 +5008,8 @@ class ComputeManager(manager.Manager):
"old: %(old_cinfo)s",
{'new_cinfo': new_cinfo, 'old_cinfo': old_cinfo},
instance=instance)
- self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
- resize_to)
+ self.driver.swap_volume(context, old_cinfo, new_cinfo, instance,
+ mountpoint, resize_to)
LOG.debug("swap_volume: Driver volume swap returned, new "
"connection_info is now : %(new_cinfo)s",
{'new_cinfo': new_cinfo})
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 5c5cf49a4a..318cb0ec81 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -528,17 +528,25 @@ class ComputeTaskManager(base.Base):
hosts = self._schedule_instances(
context, request_spec, filter_properties)
except Exception as exc:
+ num_attempts = filter_properties.get(
+ 'retry', {}).get('num_attempts', 1)
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates,
exc, request_spec)
- try:
- # If the BuildRequest stays around then instance show/lists
- # will pull from it rather than the errored instance.
- self._destroy_build_request(context, instance)
- except exception.BuildRequestNotFound:
- pass
+ # If num_attempts > 1, we're in a reschedule and probably
+ # either hit NoValidHost or MaxRetriesExceeded. Either way,
+ # the build request should already be gone and we probably
+ # can't reach the API DB from the cell conductor.
+ if num_attempts <= 1:
+ try:
+ # If the BuildRequest stays around then instance
+ # show/lists will pull from it rather than the errored
+ # instance.
+ self._destroy_build_request(context, instance)
+ except exception.BuildRequestNotFound:
+ pass
self._cleanup_allocated_networks(
context, instance, requested_networks)
return
@@ -704,8 +712,11 @@ class ComputeTaskManager(base.Base):
# RequestSpec object - probably because the instance is old
# We need to mock that the old way
filter_properties = {'ignore_hosts': [instance.host]}
+ # build_request_spec expects a primitive image dict
+ image_meta = nova_object.obj_to_primitive(
+ instance.image_meta)
request_spec = scheduler_utils.build_request_spec(
- context, image_ref, [instance])
+ context, image_meta, [instance])
elif recreate:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 1fa9805c1a..8e2f9c1213 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -2582,9 +2582,10 @@ def _instance_get_all_query(context, project_only=False, joins=None):
@pick_context_manager_reader_allow_async
def instance_get_all_by_host(context, host, columns_to_join=None):
+ query = _instance_get_all_query(context, joins=columns_to_join)
return _instances_fill_metadata(context,
- _instance_get_all_query(context).filter_by(host=host).all(),
- manual_joins=columns_to_join)
+ query.filter_by(host=host).all(),
+ manual_joins=columns_to_join)
def _instance_get_all_uuids_by_host(context, host):
diff --git a/nova/network/model.py b/nova/network/model.py
index e591502ebc..f400f2cc15 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -403,8 +403,11 @@ class VIF(Model):
return not self.__eq__(other)
def fixed_ips(self):
- return [fixed_ip for subnet in self['network']['subnets']
- for fixed_ip in subnet['ips']]
+ if self['network']:
+ return [fixed_ip for subnet in self['network']['subnets']
+ for fixed_ip in subnet['ips']]
+ else:
+ return []
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py
index 7b1c019fda..e41415f02b 100644
--- a/nova/network/neutronv2/api.py
+++ b/nova/network/neutronv2/api.py
@@ -127,7 +127,7 @@ class ClientWrapper(clientv20.Client):
"admin credential located in nova.conf"))
raise exception.NeutronAdminCredentialConfigurationInvalid()
except neutron_client_exc.Forbidden as e:
- raise exception.Forbidden(e)
+ raise exception.Forbidden(six.text_type(e))
return ret
return wrapper
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index a50f102e15..37b361870f 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -997,6 +997,12 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
context will be saved which can cause incorrect resource tracking, and
should be avoided.
"""
+ # First check to see if we even have a migration context set and if not
+ # we can exit early without lazy-loading other attributes.
+ if 'migration_context' in self and self.migration_context is None:
+ yield
+ return
+
current_values = {}
for attr_name in _MIGRATION_CONTEXT_ATTRS:
current_values[attr_name] = getattr(self, attr_name)
diff --git a/nova/scheduler/filters/isolated_hosts_filter.py b/nova/scheduler/filters/isolated_hosts_filter.py
index 86ab1c4ce9..3402ecd49a 100644
--- a/nova/scheduler/filters/isolated_hosts_filter.py
+++ b/nova/scheduler/filters/isolated_hosts_filter.py
@@ -59,7 +59,10 @@ class IsolatedHostsFilter(filters.BaseHostFilter):
return ((not restrict_isolated_hosts_to_isolated_images) or
(host_state.host not in isolated_hosts))
- image_ref = spec_obj.image.id if spec_obj.image else None
+ # Check to see if the image id is set since volume-backed instances
+ # can be created without an imageRef in the server create request.
+ image_ref = spec_obj.image.id \
+ if spec_obj.image and 'id' in spec_obj.image else None
image_isolated = image_ref in isolated_images
host_isolated = host_state.host in isolated_hosts
diff --git a/nova/tests/functional/api/client.py b/nova/tests/functional/api/client.py
index c1a3a017ae..e328eeb863 100644
--- a/nova/tests/functional/api/client.py
+++ b/nova/tests/functional/api/client.py
@@ -427,3 +427,12 @@ class TestOpenStackClient(object):
"""
return self.api_put('/servers/%s/tags' % server_id,
{'tags': tags}).body['tags']
+
+ def get_migrations(self):
+ return self.api_get('os-migrations').body['migrations']
+
+ def forced_down_service(self, binary, host, force_down=True):
+ return self.api_put('os-services/force-down',
+ {"host": host,
+ "binary": binary,
+ "forced_down": force_down}).body['service']
diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py
new file mode 100644
index 0000000000..356c1cc1c2
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1713783.py
@@ -0,0 +1,130 @@
+# Copyright 2017 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+
+from oslo_log import log as logging
+
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import integrated_helpers
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+import nova.tests.unit.image.fake
+from nova.tests.unit import policy_fixture
+
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+
+
+class FailedEvacuateStateTests(test.TestCase,
+ integrated_helpers.InstanceHelperMixin):
+ """Regression Tests for bug #1713783
+
+ When evacuation fails with NoValidHost, the migration status remains
+ 'accepted' instead of 'error'. This causes problem in case the compute
+ service starts up again and looks for migrations with status 'accepted',
+ as it then removes the local instances for those migrations even though
+ the instance never actually migrated to another host.
+ """
+
+ microversion = 'latest'
+
+ def setUp(self):
+ super(FailedEvacuateStateTests, self).setUp()
+
+ self.useFixture(policy_fixture.RealPolicyFixture())
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.PlacementFixture())
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+
+ self.api = api_fixture.admin_api
+ self.api.microversion = self.microversion
+
+ nova.tests.unit.image.fake.stub_out_image_service(self)
+
+ self.start_service('conductor')
+
+ enabled_filters = CONF.filter_scheduler.enabled_filters
+ # Remove the DiskFilter since we're using Placement for filtering on
+ # DISK_GB.
+ if 'DiskFilter' in enabled_filters:
+ enabled_filters.remove('DiskFilter')
+ self.start_service('scheduler')
+
+ self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
+
+ self.hostname = 'host1'
+ self.compute1 = self.start_service('compute', host=self.hostname)
+ fake_network.set_stub_network_methods(self)
+
+ flavors = self.api.get_flavors()
+ self.flavor1 = flavors[0]
+
+ def _wait_for_notification_event_type(self, event_type, max_retries=10):
+ retry_counter = 0
+ while True:
+ if len(fake_notifier.NOTIFICATIONS) > 0:
+ for notification in fake_notifier.NOTIFICATIONS:
+ if notification.event_type == event_type:
+ return
+ if retry_counter == max_retries:
+ self.fail('Wait for notification event type (%s) failed'
+ % event_type)
+ retry_counter += 1
+ time.sleep(0.5)
+
+ def _boot_a_server(self):
+ server_req = self._build_minimal_create_server_request(
+ self.api, 'some-server', flavor_id=self.flavor1['id'],
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ networks='none')
+ LOG.info('booting on %s', self.hostname)
+ created_server = self.api.post_server({'server': server_req})
+ return self._wait_for_state_change(
+ self.api, created_server, 'ACTIVE')
+
+ def test_evacuate_no_valid_host(self):
+ # Boot a server
+ server = self._boot_a_server()
+
+ # Force source compute down
+ self.api.forced_down_service(host=self.hostname, binary='nova-compute')
+ self.compute1.stop()
+
+ fake_notifier.stub_notifier(self)
+ fake_notifier.reset()
+
+ # Initiate evacuation
+ post = {'evacuate': {}}
+ self.api.post_server_action(server['id'], post)
+
+ self._wait_for_notification_event_type('compute_task.rebuild_server')
+
+ server = self._wait_for_state_change(self.api, server, 'ERROR')
+ self.assertEqual(self.hostname, server['OS-EXT-SRV-ATTR:host'])
+
+ # Check migrations
+ migrations = self.api.get_migrations()
+ self.assertEqual(1, len(migrations))
+ self.assertEqual('evacuation', migrations[0]['migration_type'])
+ self.assertEqual(server['id'], migrations[0]['instance_uuid'])
+ self.assertEqual(self.hostname, migrations[0]['source_compute'])
+ self.assertEqual('accepted', migrations[0]['status'])
+ # NOTE(elod.illes): Migration status should be 'error' and not
+ # 'accepted'. Needs to be replaced when bug #1713783 is fixed.
+ # self.assertEqual('error', migrations[0]['status'])
diff --git a/nova/tests/functional/regressions/test_bug_1746483.py b/nova/tests/functional/regressions/test_bug_1746483.py
new file mode 100644
index 0000000000..0576c8695a
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1746483.py
@@ -0,0 +1,106 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from nova import config
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import integrated_helpers
+from nova.tests.unit.image import fake as image_fakes
+from nova.tests.unit import policy_fixture
+from nova import utils
+from nova.virt import fake
+
+CONF = config.CONF
+
+
+class TestBootFromVolumeIsolatedHostsFilter(
+ test.TestCase, integrated_helpers.InstanceHelperMixin):
+ """Regression test for bug #1746483
+
+ The IsolatedHostsFilter checks for images restricted to certain hosts via
+ config options. When creating a server from a root volume, the image is
+ in the volume (and it's related metadata from Cinder). When creating a
+ volume-backed server, the imageRef is not required.
+
+ The regression is that the RequestSpec.image.id field is not set and the
+ IsolatedHostsFilter blows up trying to load the image id.
+ """
+ def setUp(self):
+ super(TestBootFromVolumeIsolatedHostsFilter, self).setUp()
+
+ self.useFixture(policy_fixture.RealPolicyFixture())
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.CinderFixture(self))
+ self.useFixture(nova_fixtures.PlacementFixture())
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+
+ self.api = api_fixture.admin_api
+
+ image_fakes.stub_out_image_service(self)
+ self.addCleanup(image_fakes.FakeImageService_reset)
+
+ self.start_service('conductor')
+
+ # Add the IsolatedHostsFilter to the list of enabled filters since it
+ # is not enabled by default.
+ enabled_filters = CONF.filter_scheduler.enabled_filters
+ enabled_filters.append('IsolatedHostsFilter')
+ # Remove the DiskFilter since we're using Placement for filtering on
+ # DISK_GB.
+ if 'DiskFilter' in enabled_filters:
+ enabled_filters.remove('DiskFilter')
+ self.flags(
+ enabled_filters=enabled_filters,
+ isolated_images=[image_fakes.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID],
+ isolated_hosts=['host1'],
+ restrict_isolated_hosts_to_isolated_images=True,
+ group='filter_scheduler')
+ self.start_service('scheduler')
+
+ # Create two compute nodes/services so we can restrict the image
+ # we'll use to one of the hosts.
+ for host in ('host1', 'host2'):
+ fake.set_nodes([host])
+ self.addCleanup(fake.restore_nodes)
+ self.start_service('compute', host=host)
+
+ def test_boot_from_volume_with_isolated_image(self):
+ # Create our server without networking just to keep things simple.
+ image_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
+ server_req_body = {
+ # There is no imageRef because this is boot from volume.
+ 'server': {
+ 'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
+ 'name': 'test_boot_from_volume_with_isolated_image',
+ 'networks': 'none',
+ 'block_device_mapping_v2': [{
+ 'boot_index': 0,
+ 'uuid': image_id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ }]
+ }
+ }
+ # Note that we're using v2.1 by default but need v2.37 to use
+ # networks='none'.
+ with utils.temporary_mutation(self.api, microversion='2.37'):
+ server = self.api.post_server(server_req_body)
+ server = self._wait_for_state_change(self.api, server, 'ACTIVE')
+ # NOTE(mriedem): The instance is successfully scheduled but since
+ # the image_id from the volume_image_metadata isn't stored in the
+ # RequestSpec.image.id, and restrict_isolated_hosts_to_isolated_images
+ # is True, the isolated host (host1) is filtered out because the
+ # filter doesn't have enough information to know if the image within
+ # the volume can be used on that host.
+ self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py
index 56c80eed04..10003de6ff 100644
--- a/nova/tests/unit/compute/test_compute_api.py
+++ b/nova/tests/unit/compute/test_compute_api.py
@@ -2755,7 +2755,8 @@ class _ComputeAPIUnitTestMixIn(object):
instance)
def _test_snapshot_volume_backed(self, quiesce_required, quiesce_fails,
- vm_state=vm_states.ACTIVE):
+ vm_state=vm_states.ACTIVE,
+ snapshot_fails=False):
fake_sys_meta = {'image_min_ram': '11',
'image_min_disk': '22',
'image_container_format': 'ami',
@@ -2801,6 +2802,8 @@ class _ComputeAPIUnitTestMixIn(object):
return {'id': volume_id, 'display_description': ''}
def fake_volume_create_snapshot(context, volume_id, name, description):
+ if snapshot_fails:
+ raise exception.OverQuota(overs="snapshots")
return {'id': '%s-snapshot' % volume_id}
def fake_quiesce_instance(context, instance):
@@ -2850,8 +2853,13 @@ class _ComputeAPIUnitTestMixIn(object):
'tag': None})
# All the db_only fields and the volume ones are removed
- self.compute_api.snapshot_volume_backed(
- self.context, instance, 'test-snapshot')
+ if snapshot_fails:
+ self.assertRaises(exception.OverQuota,
+ self.compute_api.snapshot_volume_backed,
+ self.context, instance, "test-snapshot")
+ else:
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, 'test-snapshot')
self.assertEqual(quiesce_expected, quiesced[0])
self.assertEqual(quiesce_expected, quiesced[1])
@@ -2889,8 +2897,13 @@ class _ComputeAPIUnitTestMixIn(object):
quiesced = [False, False]
# Check that the mappings from the image properties are not included
- self.compute_api.snapshot_volume_backed(
- self.context, instance, 'test-snapshot')
+ if snapshot_fails:
+ self.assertRaises(exception.OverQuota,
+ self.compute_api.snapshot_volume_backed,
+ self.context, instance, "test-snapshot")
+ else:
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, 'test-snapshot')
self.assertEqual(quiesce_expected, quiesced[0])
self.assertEqual(quiesce_expected, quiesced[1])
@@ -2901,6 +2914,11 @@ class _ComputeAPIUnitTestMixIn(object):
def test_snapshot_volume_backed_with_quiesce(self):
self._test_snapshot_volume_backed(True, False)
+ def test_snapshot_volume_backed_with_quiesce_create_snap_fails(self):
+ self._test_snapshot_volume_backed(quiesce_required=True,
+ quiesce_fails=False,
+ snapshot_fails=True)
+
def test_snapshot_volume_backed_with_quiesce_skipped(self):
self._test_snapshot_volume_backed(False, True)
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 394a1a4620..f37e14925b 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -65,6 +65,7 @@ from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
+from nova.volume import cinder
CONF = nova.conf.CONF
@@ -1763,162 +1764,159 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
do_test()
+ @mock.patch.object(virt_driver.ComputeDriver, 'get_volume_connector',
+ return_value={})
+ @mock.patch.object(manager.ComputeManager, '_instance_update',
+ return_value={})
+ @mock.patch.object(db, 'instance_fault_create')
+ @mock.patch.object(db, 'block_device_mapping_update')
+ @mock.patch.object(db,
+ 'block_device_mapping_get_by_instance_and_volume_id')
+ @mock.patch.object(cinder.API, 'migrate_volume_completion')
+ @mock.patch.object(cinder.API, 'terminate_connection')
+ @mock.patch.object(cinder.API, 'unreserve_volume')
+ @mock.patch.object(cinder.API, 'get')
+ @mock.patch.object(cinder.API, 'roll_detaching')
@mock.patch.object(compute_utils, 'notify_about_volume_swap')
- def test_swap_volume_volume_api_usage(self, mock_notify):
+ def _test_swap_volume(self, mock_notify, mock_roll_detaching,
+ mock_cinder_get, mock_unreserve_volume,
+ mock_terminate_connection,
+ mock_migrate_volume_completion,
+ mock_bdm_get, mock_bdm_update,
+ mock_instance_fault_create,
+ mock_instance_update,
+ mock_get_volume_connector,
+ expected_exception=None):
# This test ensures that volume_id arguments are passed to volume_api
# and that volume states are OK
volumes = {}
- old_volume_id = uuids.fake
- volumes[old_volume_id] = {'id': old_volume_id,
+ volumes[uuids.old_volume] = {'id': uuids.old_volume,
'display_name': 'old_volume',
'status': 'detaching',
'size': 1}
- new_volume_id = uuids.fake_2
- volumes[new_volume_id] = {'id': new_volume_id,
+ volumes[uuids.new_volume] = {'id': uuids.new_volume,
'display_name': 'new_volume',
'status': 'available',
'size': 2}
- def fake_vol_api_roll_detaching(cls, context, volume_id):
- self.assertTrue(uuidutils.is_uuid_like(volume_id))
- if volumes[volume_id]['status'] == 'detaching':
- volumes[volume_id]['status'] = 'in-use'
-
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vdb', 'source_type': 'volume',
'destination_type': 'volume',
'instance_uuid': uuids.instance,
'connection_info': '{"foo": "bar"}'})
- def fake_vol_api_func(cls, context, volume, *args):
+ def fake_vol_api_roll_detaching(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'detaching':
+ volumes[volume_id]['status'] = 'in-use'
+
+ def fake_vol_api_func(context, volume, *args):
self.assertTrue(uuidutils.is_uuid_like(volume))
return {}
- def fake_vol_get(cls, context, volume_id):
+ def fake_vol_get(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
return volumes[volume_id]
- def fake_vol_unreserve(cls, context, volume_id):
+ def fake_vol_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
- def fake_vol_migrate_volume_completion(cls, context, old_volume_id,
+ def fake_vol_migrate_volume_completion(context, old_volume_id,
new_volume_id, error=False):
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
self.assertTrue(uuidutils.is_uuid_like(new_volume_id))
volumes[old_volume_id]['status'] = 'in-use'
return {'save_volume_id': new_volume_id}
- def fake_func_exc(*args, **kwargs):
- raise AttributeError # Random exception
-
- def fake_swap_volume(cls, old_connection_info, new_connection_info,
- instance, mountpoint, resize_to):
- self.assertEqual(resize_to, 2)
-
def fake_block_device_mapping_update(ctxt, id, updates, legacy):
self.assertEqual(2, updates['volume_size'])
return fake_bdm
- self.stub_out('nova.volume.cinder.API.roll_detaching',
- fake_vol_api_roll_detaching)
- self.stub_out('nova.volume.cinder.API.get', fake_vol_get)
- self.stub_out('nova.volume.cinder.API.initialize_connection',
- fake_vol_api_func)
- self.stub_out('nova.volume.cinder.API.unreserve_volume',
- fake_vol_unreserve)
- self.stub_out('nova.volume.cinder.API.terminate_connection',
- fake_vol_api_func)
- self.stub_out('nova.db.'
- 'block_device_mapping_get_by_instance_and_volume_id',
- lambda x, y, z, v: fake_bdm)
- self.stub_out('nova.virt.driver.ComputeDriver.get_volume_connector',
- lambda x: {})
- self.stub_out('nova.virt.driver.ComputeDriver.swap_volume',
- fake_swap_volume)
- self.stub_out('nova.volume.cinder.API.migrate_volume_completion',
- fake_vol_migrate_volume_completion)
- self.stub_out('nova.db.block_device_mapping_update',
- fake_block_device_mapping_update)
- self.stub_out('nova.db.instance_fault_create',
- lambda x, y:
- test_instance_fault.fake_faults['fake-uuid'][0])
- self.stub_out('nova.compute.manager.ComputeManager.'
- '_instance_update', lambda c, u, **k: {})
-
- # Good path
+ mock_roll_detaching.side_effect = fake_vol_api_roll_detaching
+ mock_terminate_connection.side_effect = fake_vol_api_func
+ mock_cinder_get.side_effect = fake_vol_get
+ mock_migrate_volume_completion.side_effect = (
+ fake_vol_migrate_volume_completion)
+ mock_unreserve_volume.side_effect = fake_vol_unreserve
+ mock_bdm_get.return_value = fake_bdm
+ mock_bdm_update.side_effect = fake_block_device_mapping_update
+ mock_instance_fault_create.return_value = (
+ test_instance_fault.fake_faults['fake-uuid'][0])
+
instance1 = fake_instance.fake_instance_obj(
self.context, **{'uuid': uuids.instance})
- self.compute.swap_volume(self.context, old_volume_id, new_volume_id,
- instance1)
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(2, mock_notify.call_count)
- mock_notify.assert_any_call(test.MatchType(context.RequestContext),
- instance1, self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.START,
- old_volume_id, new_volume_id)
- mock_notify.assert_any_call(test.MatchType(context.RequestContext),
- instance1, self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.END,
- old_volume_id, new_volume_id)
-
- # Error paths
- mock_notify.reset_mock()
- volumes[old_volume_id]['status'] = 'detaching'
- volumes[new_volume_id]['status'] = 'attaching'
- self.stub_out('nova.virt.fake.FakeDriver.swap_volume',
- fake_func_exc)
- instance2 = fake_instance.fake_instance_obj(
- self.context, **{'uuid': uuids.instance})
- self.assertRaises(AttributeError, self.compute.swap_volume,
- self.context, old_volume_id, new_volume_id,
- instance2)
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
- self.assertEqual(2, mock_notify.call_count)
- mock_notify.assert_any_call(
- test.MatchType(context.RequestContext), instance2,
- self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.START,
- old_volume_id, new_volume_id)
- mock_notify.assert_any_call(
- test.MatchType(context.RequestContext), instance2,
- self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.ERROR,
- old_volume_id, new_volume_id,
- test.MatchType(AttributeError))
-
- mock_notify.reset_mock()
- volumes[old_volume_id]['status'] = 'detaching'
- volumes[new_volume_id]['status'] = 'attaching'
- self.stub_out('nova.volume.cinder.API.initialize_connection',
- fake_func_exc)
- instance3 = fake_instance.fake_instance_obj(
- self.context, **{'uuid': uuids.instance})
- self.assertRaises(AttributeError, self.compute.swap_volume,
- self.context, old_volume_id, new_volume_id,
- instance3)
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
- self.assertEqual(2, mock_notify.call_count)
- mock_notify.assert_any_call(
- test.MatchType(context.RequestContext), instance3,
- self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.START,
- old_volume_id, new_volume_id)
- mock_notify.assert_any_call(
- test.MatchType(context.RequestContext), instance3,
- self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.ERROR,
- old_volume_id, new_volume_id,
- test.MatchType(AttributeError))
+
+ if expected_exception:
+ volumes[uuids.old_volume]['status'] = 'detaching'
+ volumes[uuids.new_volume]['status'] = 'attaching'
+ self.assertRaises(expected_exception, self.compute.swap_volume,
+ self.context, uuids.old_volume, uuids.new_volume,
+ instance1)
+ self.assertEqual('in-use', volumes[uuids.old_volume]['status'])
+ self.assertEqual('available', volumes[uuids.new_volume]['status'])
+ self.assertEqual(2, mock_notify.call_count)
+ mock_notify.assert_any_call(
+ test.MatchType(context.RequestContext), instance1,
+ self.compute.host,
+ fields.NotificationAction.VOLUME_SWAP,
+ fields.NotificationPhase.START,
+ uuids.old_volume, uuids.new_volume)
+ mock_notify.assert_any_call(
+ test.MatchType(context.RequestContext), instance1,
+ self.compute.host,
+ fields.NotificationAction.VOLUME_SWAP,
+ fields.NotificationPhase.ERROR,
+ uuids.old_volume, uuids.new_volume,
+ test.MatchType(expected_exception))
+ else:
+ self.compute.swap_volume(self.context, uuids.old_volume,
+ uuids.new_volume, instance1)
+ self.assertEqual(volumes[uuids.old_volume]['status'], 'in-use')
+ self.assertEqual(2, mock_notify.call_count)
+ mock_notify.assert_any_call(test.MatchType(context.RequestContext),
+ instance1, self.compute.host,
+ fields.NotificationAction.VOLUME_SWAP,
+ fields.NotificationPhase.START,
+ uuids.old_volume, uuids.new_volume)
+ mock_notify.assert_any_call(test.MatchType(context.RequestContext),
+ instance1, self.compute.host,
+ fields.NotificationAction.VOLUME_SWAP,
+ fields.NotificationPhase.END,
+ uuids.old_volume, uuids.new_volume)
+
+ def _assert_volume_api(self, context, volume, *args):
+ self.assertTrue(uuidutils.is_uuid_like(volume))
+ return {}
+
+ def _assert_swap_volume(self, context, old_connection_info,
+ new_connection_info, instance, mountpoint,
+ resize_to):
+ self.assertEqual(2, resize_to)
+
+ @mock.patch.object(cinder.API, 'initialize_connection')
+ @mock.patch.object(fake_driver.FakeDriver, 'swap_volume')
+ def test_swap_volume_volume_api_usage(self, mock_swap_volume,
+ mock_initialize_connection):
+ mock_initialize_connection.side_effect = self._assert_volume_api
+ mock_swap_volume.side_effect = self._assert_swap_volume
+ self._test_swap_volume()
+
+ @mock.patch.object(cinder.API, 'initialize_connection')
+ @mock.patch.object(fake_driver.FakeDriver, 'swap_volume',
+ side_effect=test.TestingException())
+ def test_swap_volume_with_compute_driver_exception(
+ self, mock_swap_volume, mock_initialize_connection):
+ mock_initialize_connection.side_effect = self._assert_volume_api
+ self._test_swap_volume(expected_exception=test.TestingException)
+
+ @mock.patch.object(cinder.API, 'initialize_connection',
+ side_effect=test.TestingException())
+ @mock.patch.object(fake_driver.FakeDriver, 'swap_volume')
+ def test_swap_volume_with_initialize_connection_exception(
+ self, mock_swap_volume, mock_initialize_connection):
+ self._test_swap_volume(expected_exception=test.TestingException)
@mock.patch('nova.compute.utils.notify_about_volume_swap')
@mock.patch('nova.db.block_device_mapping_get_by_instance_and_volume_id')
@@ -3022,7 +3020,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
ex = exception.InstanceNotFound(instance_id=instance.uuid)
self._test_rebuild_ex(instance, ex)
- def test_rebuild_node_not_updated_if_not_recreate(self):
+ # A rebuild to the same host should never attempt a rebuild claim.
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.rebuild_claim',
+ new_callable=mock.NonCallableMock)
+ def test_rebuild_node_not_updated_if_not_recreate(self,
+ mock_rebuild_claim):
node = uuidutils.generate_uuid() # ironic node uuid
instance = fake_instance.fake_instance_obj(self.context, node=node)
instance.migration_context = None
@@ -3776,6 +3778,9 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.block_device_mapping, self.node, self.limits, {})
mock_clean_net.assert_called_once_with(self.context, self.instance,
self.requested_networks)
+ mock_clean_vol.assert_called_once_with(self.context,
+ self.instance.uuid, self.block_device_mapping,
+ raise_exc=False)
mock_add.assert_called_once_with(self.context, self.instance,
mock.ANY, mock.ANY, fault_message=mock.ANY)
mock_nil.assert_called_once_with(self.instance)
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index e4882565d3..66105ba019 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -1239,6 +1239,9 @@ class _BaseTaskTestCase(object):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
+ bs_mock.assert_called_once_with(
+ self.context, obj_base.obj_to_primitive(inst_obj.image_meta),
+ [inst_obj])
fp_mock.assert_called_once_with(self.context, request_spec,
filter_properties)
select_dest_mock.assert_called_once_with(self.context, fake_spec)
@@ -2342,6 +2345,52 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
block_device_mapping=mock.ANY,
node='node2', limits=[])
+ @mock.patch('nova.objects.Instance.save')
+ def test_build_instances_max_retries_exceeded(self, mock_save):
+ """Tests that when populate_retry raises MaxRetriesExceeded in
+ build_instances, we don't attempt to cleanup the build request.
+ """
+ instance = fake_instance.fake_instance_obj(self.context)
+ image = {'id': uuids.image_id}
+ filter_props = {
+ 'retry': {
+ 'num_attempts': CONF.scheduler.max_attempts
+ }
+ }
+ requested_networks = objects.NetworkRequestList()
+ with mock.patch.object(self.conductor, '_destroy_build_request',
+ new_callable=mock.NonCallableMock):
+ self.conductor.build_instances(
+ self.context, [instance], image, filter_props,
+ mock.sentinel.admin_pass, mock.sentinel.files,
+ requested_networks, mock.sentinel.secgroups)
+ mock_save.assert_called_once_with()
+
+ @mock.patch('nova.objects.Instance.save')
+ def test_build_instances_reschedule_no_valid_host(self, mock_save):
+ """Tests that when select_destinations raises NoValidHost in
+ build_instances, we don't attempt to cleanup the build request if
+ we're rescheduling (num_attempts>1).
+ """
+ instance = fake_instance.fake_instance_obj(self.context)
+ image = {'id': uuids.image_id}
+ filter_props = {
+ 'retry': {
+ 'num_attempts': 1 # populate_retry will increment this
+ }
+ }
+ requested_networks = objects.NetworkRequestList()
+ with mock.patch.object(self.conductor, '_destroy_build_request',
+ new_callable=mock.NonCallableMock):
+ with mock.patch.object(
+ self.conductor.scheduler_client, 'select_destinations',
+ side_effect=exc.NoValidHost(reason='oops')):
+ self.conductor.build_instances(
+ self.context, [instance], image, filter_props,
+ mock.sentinel.admin_pass, mock.sentinel.files,
+ requested_networks, mock.sentinel.secgroups)
+ mock_save.assert_called_once_with()
+
def test_cleanup_allocated_networks_none_requested(self):
# Tests that we don't deallocate networks if 'none' were specifically
# requested.
diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py
index b243d3bd82..2e7566c8e6 100644
--- a/nova/tests/unit/db/test_db_api.py
+++ b/nova/tests/unit/db/test_db_api.py
@@ -1182,6 +1182,28 @@ class SqlAlchemyDbApiTestCase(DbTestCase):
result = test(ctxt)
self.assertEqual(2, len(result))
+ # make sure info_cache and security_groups were auto-joined
+ instance = result[0]
+ self.assertIn('info_cache', instance)
+ self.assertIn('security_groups', instance)
+
+ def test_instance_get_all_by_host_no_joins(self):
+ """Tests that we don't join on the info_cache and security_groups
+ tables if columns_to_join is an empty list.
+ """
+ self.create_instance_with_args()
+
+ @sqlalchemy_api.pick_context_manager_reader
+ def test(ctxt):
+ return sqlalchemy_api.instance_get_all_by_host(
+ ctxt, 'host1', columns_to_join=[])
+
+ result = test(context.get_admin_context())
+ self.assertEqual(1, len(result))
+ # make sure info_cache and security_groups were not auto-joined
+ instance = result[0]
+ self.assertNotIn('info_cache', instance)
+ self.assertNotIn('security_groups', instance)
def test_instance_get_all_uuids_by_host(self):
ctxt = context.get_admin_context()
diff --git a/nova/tests/unit/network/test_network_info.py b/nova/tests/unit/network/test_network_info.py
index 100edbd4c8..5e0e20d933 100644
--- a/nova/tests/unit/network/test_network_info.py
+++ b/nova/tests/unit/network/test_network_info.py
@@ -423,6 +423,11 @@ class VIFTests(test.NoDBTestCase):
] * 2
self.assertEqual(fixed_ips, ips)
+ def test_vif_get_fixed_ips_network_is_none(self):
+ vif = model.VIF()
+ fixed_ips = vif.fixed_ips()
+ self.assertEqual([], fixed_ips)
+
def test_vif_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
diff --git a/nova/tests/unit/network/test_neutronv2.py b/nova/tests/unit/network/test_neutronv2.py
index 42428c052f..a8427de481 100644
--- a/nova/tests/unit/network/test_neutronv2.py
+++ b/nova/tests/unit/network/test_neutronv2.py
@@ -181,9 +181,10 @@ class TestNeutronClient(test.NoDBTestCase):
auth_token='token',
is_admin=False)
client = neutronapi.get_client(my_context)
- self.assertRaises(
+ exc = self.assertRaises(
exception.Forbidden,
client.create_port)
+ self.assertIsInstance(exc.format_message(), six.text_type)
def test_withtoken_context_is_admin(self):
self.flags(url='http://anyhost/', group='neutron')
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
index 8f05d1725b..6ff9ca8362 100644
--- a/nova/tests/unit/objects/test_instance.py
+++ b/nova/tests/unit/objects/test_instance.py
@@ -1439,6 +1439,20 @@ class _TestInstanceObject(object):
inst_value = getattr(inst, attr_name)
self.assertIs(expected_objs[attr_name], inst_value)
+ @mock.patch('nova.objects.Instance.obj_load_attr',
+ new_callable=mock.NonCallableMock) # asserts not called
+ def test_mutated_migration_context_early_exit(self, obj_load_attr):
+ """Tests that we exit early from mutated_migration_context if the
+ migration_context attribute is set to None meaning this instance is
+ not being migrated.
+ """
+ inst = instance.Instance(context=self.context, migration_context=None)
+ for attr in instance._MIGRATION_CONTEXT_ATTRS:
+ self.assertNotIn(attr, inst)
+ with inst.mutated_migration_context():
+ for attr in instance._MIGRATION_CONTEXT_ATTRS:
+ self.assertNotIn(attr, inst)
+
def test_clear_numa_topology(self):
numa_topology = (test_instance_numa_topology.
fake_obj_numa_topology.obj_clone())
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 64b97073f1..54329ea7f3 100755
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -14936,6 +14936,26 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertTrue(instance.cleaned)
save.assert_called_once_with()
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
+ def test_swap_volume_native_luks_blocked(self, mock_get_encryption):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ # dest volume is encrypted
+ mock_get_encryption.side_effect = [{}, {'provider': 'luks'}]
+ self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
+ {}, {}, None, None, None)
+
+ # src volume is encrypted
+ mock_get_encryption.side_effect = [{'provider': 'luks'}, {}]
+ self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
+ {}, {}, None, None, None)
+
+ # both volumes are encrypted
+ mock_get_encryption.side_effect = [{'provider': 'luks'},
+ {'provider': 'luks'}]
+ self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
+ {}, {}, None, None, None)
+
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete',
return_value=True)
def _test_swap_volume(self, mock_is_job_complete, source_type,
@@ -15112,8 +15132,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
conf = mock.MagicMock(source_path='/fake-new-volume')
get_volume_config.return_value = conf
- conn.swap_volume(old_connection_info, new_connection_info, instance,
- '/dev/vdb', 1)
+ conn.swap_volume(self.context, old_connection_info,
+ new_connection_info, instance, '/dev/vdb', 1)
get_guest.assert_called_once_with(instance)
connect_volume.assert_called_once_with(new_connection_info, disk_info)
@@ -15130,6 +15150,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
def test_swap_volume_driver_source_is_snapshot(self):
self._test_swap_volume_driver(source_type='snapshot')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.rebase')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@@ -15139,7 +15160,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
def test_swap_volume_disconnect_new_volume_on_rebase_error(self,
write_config, get_guest, get_disk, get_volume_config,
- connect_volume, disconnect_volume, rebase):
+ connect_volume, disconnect_volume, rebase,
+ get_volume_encryption):
"""Assert that disconnect_volume is called for the new volume if an
error is encountered while rebasing
"""
@@ -15147,12 +15169,13 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance = objects.Instance(**self.test_instance)
guest = libvirt_guest.Guest(mock.MagicMock())
get_guest.return_value = guest
+ get_volume_encryption.return_value = {}
exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
'internal error', error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
rebase.side_effect = exc
self.assertRaises(exception.VolumeRebaseFailed, conn.swap_volume,
- mock.sentinel.old_connection_info,
+ self.context, mock.sentinel.old_connection_info,
mock.sentinel.new_connection_info,
instance, '/dev/vdb', 0)
connect_volume.assert_called_once_with(
@@ -15161,6 +15184,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
disconnect_volume.assert_called_once_with(
mock.sentinel.new_connection_info, 'vdb')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.abort_job')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@@ -15171,7 +15195,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
def test_swap_volume_disconnect_new_volume_on_pivot_error(self,
write_config, get_guest, get_disk, get_volume_config,
- connect_volume, disconnect_volume, abort_job, is_job_complete):
+ connect_volume, disconnect_volume, abort_job, is_job_complete,
+ get_volume_encryption):
"""Assert that disconnect_volume is called for the new volume if an
error is encountered while pivoting to the new volume
"""
@@ -15179,13 +15204,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance = objects.Instance(**self.test_instance)
guest = libvirt_guest.Guest(mock.MagicMock())
get_guest.return_value = guest
+ get_volume_encryption.return_value = {}
exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
'internal error', error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
is_job_complete.return_value = True
abort_job.side_effect = [None, exc]
self.assertRaises(exception.VolumeRebaseFailed, conn.swap_volume,
- mock.sentinel.old_connection_info,
+ self.context, mock.sentinel.old_connection_info,
mock.sentinel.new_connection_info,
instance, '/dev/vdb', 0)
connect_volume.assert_called_once_with(
diff --git a/nova/tests/unit/virt/libvirt/test_migration.py b/nova/tests/unit/virt/libvirt/test_migration.py
index ddb7d23ca1..83abbd6386 100644
--- a/nova/tests/unit/virt/libvirt/test_migration.py
+++ b/nova/tests/unit/virt/libvirt/test_migration.py
@@ -243,6 +243,73 @@ class UtilityMigrationTestCase(test.NoDBTestCase):
'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z')
self.assertThat(res, matchers.XMLMatches(new_xml))
+ def test_update_volume_xml_keeps_address(self):
+ # Now test to make sure address isn't altered for virtio-scsi and rbd
+ connection_info = {
+ 'driver_volume_type': 'rbd',
+ 'serial': 'd299a078-f0db-4993-bf03-f10fe44fd192',
+ 'data': {
+ 'access_mode': 'rw',
+ 'secret_type': 'ceph',
+ 'name': 'cinder-volumes/volume-d299a078',
+ 'encrypted': False,
+ 'discard': True,
+ 'cluster_name': 'ceph',
+ 'secret_uuid': '1a790a26-dd49-4825-8d16-3dd627cf05a9',
+ 'qos_specs': None,
+ 'auth_enabled': True,
+ 'volume_id': 'd299a078-f0db-4993-bf03-f10fe44fd192',
+ 'hosts': ['172.16.128.101', '172.16.128.121'],
+ 'auth_username': 'cinder',
+ 'ports': ['6789', '6789', '6789']}}
+ bdm = objects.LibvirtLiveMigrateBDMInfo(
+ serial='d299a078-f0db-4993-bf03-f10fe44fd192',
+ bus='scsi', type='disk', dev='sdc',
+ connection_info=connection_info)
+ data = objects.LibvirtLiveMigrateData(
+ target_connect_addr=None,
+ bdms=[bdm],
+ block_migration=False)
+ xml = """<domain>
+ <devices>
+ <disk type='network' device='disk'>
+ <driver name='qemu' type='raw' cache='writeback' discard='unmap'/>
+ <auth username='cinder'>
+ <secret type='ceph' uuid='1a790a26-dd49-4825-8d16-3dd627cf05a9'/>
+ </auth>
+ <source protocol='rbd' name='cinder-volumes/volume-d299a078'>
+ <host name='172.16.128.101' port='6789'/>
+ <host name='172.16.128.121' port='6789'/>
+ </source>
+ <backingStore/>
+ <target dev='sdb' bus='scsi'/>
+ <serial>d299a078-f0db-4993-bf03-f10fe44fd192</serial>
+ <alias name='scsi0-0-0-1'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='1'/>
+ </disk>
+ </devices>
+</domain>"""
+ conf = vconfig.LibvirtConfigGuestDisk()
+ conf.source_device = bdm.type
+ conf.driver_name = "qemu"
+ conf.driver_format = "raw"
+ conf.driver_cache = "writeback"
+ conf.target_dev = bdm.dev
+ conf.target_bus = bdm.bus
+ conf.serial = bdm.connection_info.get('serial')
+ conf.source_type = "network"
+ conf.driver_discard = 'unmap'
+ conf.device_addr = vconfig.LibvirtConfigGuestDeviceAddressDrive()
+ conf.device_addr.controller = 0
+
+ get_volume_config = mock.MagicMock(return_value=conf)
+ doc = etree.fromstring(xml)
+ res = etree.tostring(migration._update_volume_xml(
+ doc, data, get_volume_config), encoding='unicode')
+ new_xml = xml.replace('sdb',
+ 'sdc')
+ self.assertThat(res, matchers.XMLMatches(new_xml))
+
def test_update_perf_events_xml(self):
data = objects.LibvirtLiveMigrateData(
supported_perf_events=['cmt'])
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
index e576fcd79d..7bacb2df68 100644
--- a/nova/tests/unit/virt/test_block_device.py
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -1085,3 +1085,28 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# can't assert_not_called if the method isn't in the spec.
self.assertFalse(hasattr(test_eph, 'refresh_connection_info'))
self.assertFalse(hasattr(test_swap, 'refresh_connection_info'))
+
+
+class TestGetVolumeId(test.NoDBTestCase):
+
+ def test_get_volume_id_none_found(self):
+ self.assertIsNone(driver_block_device.get_volume_id(None))
+ self.assertIsNone(driver_block_device.get_volume_id({}))
+ self.assertIsNone(driver_block_device.get_volume_id({'data': {}}))
+
+ def test_get_volume_id_found_volume_id_no_serial(self):
+ self.assertEqual(uuids.volume_id,
+ driver_block_device.get_volume_id(
+ {'data': {'volume_id': uuids.volume_id}}))
+
+ def test_get_volume_id_found_no_volume_id_serial(self):
+ self.assertEqual(uuids.serial,
+ driver_block_device.get_volume_id(
+ {'serial': uuids.serial}))
+
+ def test_get_volume_id_found_both(self):
+ # volume_id is taken over serial
+ self.assertEqual(uuids.volume_id,
+ driver_block_device.get_volume_id(
+ {'serial': uuids.serial,
+ 'data': {'volume_id': uuids.volume_id}}))
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index bc23e19b1a..b2bb38b8a2 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -488,7 +488,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
instance_ref,
'/dev/sda'))
self.assertIsNone(
- self.connection.swap_volume({'driver_volume_type': 'fake',
+ self.connection.swap_volume(None, {'driver_volume_type': 'fake',
'data': {}},
{'driver_volume_type': 'fake',
'data': {}},
diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py
index 1da9d09912..edcf5c5496 100644
--- a/nova/virt/block_device.py
+++ b/nova/virt/block_device.py
@@ -570,3 +570,13 @@ def is_block_device_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank')
and bdm.destination_type == 'volume'
and is_implemented(bdm))
+
+
+def get_volume_id(connection_info):
+ if connection_info:
+ # Check for volume_id in 'data' and if not there, fallback to
+ # the 'serial' that the DriverVolumeBlockDevice adds during attach.
+ volume_id = connection_info.get('data', {}).get('volume_id')
+ if not volume_id:
+ volume_id = connection_info.get('serial')
+ return volume_id
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index fc15519c4e..6736296501 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -455,10 +455,11 @@ class ComputeDriver(object):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
- def swap_volume(self, old_connection_info, new_connection_info,
+ def swap_volume(self, context, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the volume attached to the given `instance`.
+ :param context: The request context.
:param dict old_connection_info:
The volume for this connection gets detached from the given
`instance`.
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 1a4d5bec68..846c076b16 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -298,7 +298,7 @@ class FakeDriver(driver.ComputeDriver):
except KeyError:
pass
- def swap_volume(self, old_connection_info, new_connection_info,
+ def swap_volume(self, context, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance."""
instance_name = instance.name
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 509900736b..8886f77b24 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1200,6 +1200,16 @@ class LibvirtDriver(driver.ComputeDriver):
**encryption)
return encryptor
+ def _get_volume_encryption(self, context, connection_info):
+ """Get the encryption metadata dict if it is not provided
+ """
+ encryption = {}
+ volume_id = driver_block_device.get_volume_id(connection_info)
+ if volume_id:
+ encryption = encryptors.get_encryption_metadata(context,
+ self._volume_api, volume_id, connection_info)
+ return encryption
+
def _check_discard_for_attach_volume(self, conf, instance):
"""Perform some checks for volumes configured for discard support.
@@ -1348,9 +1358,19 @@ class LibvirtDriver(driver.ComputeDriver):
finally:
self._host.write_instance_config(xml)
- def swap_volume(self, old_connection_info,
+ def swap_volume(self, context, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
+ # NOTE(lyarwood): Bug #1739593 uncovered a nasty data corruption
+ # issue that was fixed in Queens by Ica323b87fa85a454fca9d46ada3677f18.
+ # Given the size of the bugfix it was agreed not to backport the change
+ # to earlier stable branches and to instead block swap volume attempts.
+ if (self._get_volume_encryption(context, old_connection_info) or
+ self._get_volume_encryption(context, new_connection_info)):
+ raise NotImplementedError(_("Swap volume is not supported when "
+ "using encrypted volumes. For more details see "
+ "https://bugs.launchpad.net/nova/+bug/1739593."))
+
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
diff --git a/nova/virt/libvirt/migration.py b/nova/virt/libvirt/migration.py
index 28c49a4258..58bb27588c 100644
--- a/nova/virt/libvirt/migration.py
+++ b/nova/virt/libvirt/migration.py
@@ -162,15 +162,20 @@ def _update_volume_xml(xml_doc, migrate_data, get_volume_config):
# If source and destination have same item, update
# the item using destination value.
for item_dst in xml_doc2.findall(item_src.tag):
- disk_dev.remove(item_src)
- item_dst.tail = None
- disk_dev.insert(cnt, item_dst)
+ if item_dst.tag != 'address':
+ # hw address presented to guest must never change,
+ # especially during live migration as it can be fatal
+ disk_dev.remove(item_src)
+ item_dst.tail = None
+ disk_dev.insert(cnt, item_dst)
# If destination has additional items, thses items should be
# added here.
for item_dst in list(xml_doc2):
- item_dst.tail = None
- disk_dev.insert(cnt, item_dst)
+ if item_dst.tag != 'address':
+ # again, hw address presented to guest must never change
+ item_dst.tail = None
+ disk_dev.insert(cnt, item_dst)
return xml_doc
diff --git a/releasenotes/notes/bug-1739593-cve-2017-18191-25fe48d336d8cf13.yaml b/releasenotes/notes/bug-1739593-cve-2017-18191-25fe48d336d8cf13.yaml
new file mode 100644
index 0000000000..915b34053e
--- /dev/null
+++ b/releasenotes/notes/bug-1739593-cve-2017-18191-25fe48d336d8cf13.yaml
@@ -0,0 +1,9 @@
+---
+prelude: >
+ This release includes fixes for security vulnerabilities.
+security:
+ - |
+ [CVE-2017-18191] Swapping encrypted volumes can lead to data loss and a
+ possible compute host DOS attack.
+
+ * `Bug 1739593 <https://bugs.launchpad.net/nova/+bug/1739593>`_