summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nova/compute/api.py59
-rw-r--r--nova/compute/manager.py30
-rw-r--r--nova/conductor/manager.py28
-rw-r--r--nova/network/model.py7
-rw-r--r--nova/network/neutronv2/api.py2
-rw-r--r--nova/objects/instance.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1713783.py5
-rw-r--r--nova/tests/unit/compute/test_compute_api.py28
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py241
-rw-r--r--nova/tests/unit/compute/test_shelve.py86
-rw-r--r--nova/tests/unit/conductor/test_conductor.py16
-rw-r--r--nova/tests/unit/network/test_network_info.py5
-rw-r--r--nova/tests/unit/network/test_neutronv2.py3
-rw-r--r--nova/tests/unit/objects/test_instance.py14
-rwxr-xr-xnova/tests/unit/virt/libvirt/test_driver.py46
-rw-r--r--nova/tests/unit/virt/test_block_device.py25
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py2
-rw-r--r--nova/virt/block_device.py10
-rw-r--r--nova/virt/driver.py3
-rw-r--r--nova/virt/fake.py2
-rw-r--r--nova/virt/images.py2
-rw-r--r--nova/virt/libvirt/driver.py38
-rw-r--r--releasenotes/notes/bug-1739593-cve-2017-18191-25fe48d336d8cf13.yaml9
23 files changed, 474 insertions, 193 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index b3be3669cc..3038948143 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -2894,6 +2894,8 @@ class API(base.Base):
quiesced = False
if instance.vm_state == vm_states.ACTIVE:
try:
+ LOG.info(_LI("Attempting to quiesce instance before volume "
+ "snapshot."), instance=instance)
self.compute_rpcapi.quiesce_instance(context, instance)
quiesced = True
except (exception.InstanceQuiesceNotSupported,
@@ -2911,28 +2913,43 @@ class API(base.Base):
context, instance.uuid)
mapping = []
- for bdm in bdms:
- if bdm.no_device:
- continue
-
- if bdm.is_volume:
- # create snapshot based on volume_id
- volume = self.volume_api.get(context, bdm.volume_id)
- # NOTE(yamahata): Should we wait for snapshot creation?
- # Linux LVM snapshot creation completes in
- # short time, it doesn't matter for now.
- name = _('snapshot for %s') % image_meta['name']
- LOG.debug('Creating snapshot from volume %s.', volume['id'],
- instance=instance)
- snapshot = self.volume_api.create_snapshot_force(
- context, volume['id'], name, volume['display_description'])
- mapping_dict = block_device.snapshot_from_bdm(snapshot['id'],
- bdm)
- mapping_dict = mapping_dict.get_image_mapping()
- else:
- mapping_dict = bdm.get_image_mapping()
+ try:
+ for bdm in bdms:
+ if bdm.no_device:
+ continue
- mapping.append(mapping_dict)
+ if bdm.is_volume:
+ # create snapshot based on volume_id
+ volume = self.volume_api.get(context, bdm.volume_id)
+ # NOTE(yamahata): Should we wait for snapshot creation?
+ # Linux LVM snapshot creation completes in short time,
+ # it doesn't matter for now.
+ name = _('snapshot for %s') % image_meta['name']
+ LOG.debug('Creating snapshot from volume %s.',
+ volume['id'], instance=instance)
+ snapshot = self.volume_api.create_snapshot_force(
+ context, volume['id'],
+ name, volume['display_description'])
+ mapping_dict = block_device.snapshot_from_bdm(
+ snapshot['id'], bdm)
+ mapping_dict = mapping_dict.get_image_mapping()
+ else:
+ mapping_dict = bdm.get_image_mapping()
+
+ mapping.append(mapping_dict)
+ # NOTE(tasker): No error handling is done in the above for loop.
+ # This means that if the snapshot fails and throws an exception
+ # the traceback will skip right over the unquiesce needed below.
+ # Here, catch any exception, unquiesce the instance, and raise the
+ # error so that the calling function can do what it needs to in
+ # order to properly treat a failed snap.
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ if quiesced:
+ LOG.info(_LI("Unquiescing instance after volume snapshot "
+ "failure."), instance=instance)
+ self.compute_rpcapi.unquiesce_instance(
+ context, instance, mapping)
if quiesced:
self.compute_rpcapi.unquiesce_instance(context, instance, mapping)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 2e83f07ae6..dabab1cdad 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1796,6 +1796,8 @@ class ComputeManager(manager.Manager):
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
+ self._cleanup_volumes(context, instance.uuid,
+ block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info(),
fault_message=e.kwargs['reason'])
@@ -2722,23 +2724,14 @@ class ComputeManager(manager.Manager):
LOG.info(_LI("Rebuilding instance"), instance=instance)
- # NOTE(gyee): there are three possible scenarios.
- #
- # 1. instance is being rebuilt on the same node. In this case,
- # recreate should be False and scheduled_node should be None.
- # 2. instance is being rebuilt on a node chosen by the
- # scheduler (i.e. evacuate). In this case, scheduled_node should
- # be specified and recreate should be True.
- # 3. instance is being rebuilt on a node chosen by the user. (i.e.
- # force evacuate). In this case, scheduled_node is not specified
- # and recreate is set to True.
- #
- # For scenarios #2 and #3, we must do rebuild claim as server is
- # being evacuated to a different node.
- if recreate or scheduled_node is not None:
+ if recreate:
+ # This is an evacuation to a new host, so we need to perform a
+ # resource claim.
rt = self._get_resource_tracker()
rebuild_claim = rt.rebuild_claim
else:
+ # This is a rebuild to the same host, so we don't need to make
+ # a claim since the instance is already on this host.
rebuild_claim = claims.NopClaim
image_meta = {}
@@ -4494,6 +4487,11 @@ class ComputeManager(manager.Manager):
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
+ # FIXME: Umm, shouldn't we be rolling back port bindings too?
+ self._terminate_volume_connections(context, instance, bdms)
+ # The reverts_task_state decorator on unshelve_instance will
+ # eventually save these updates.
+ self._nil_out_instance_obj_host_and_node(instance)
if image:
instance.image_ref = shelved_image_ref
@@ -5015,8 +5013,8 @@ class ComputeManager(manager.Manager):
"old: %(old_cinfo)s",
{'new_cinfo': new_cinfo, 'old_cinfo': old_cinfo},
instance=instance)
- self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
- resize_to)
+ self.driver.swap_volume(context, old_cinfo, new_cinfo, instance,
+ mountpoint, resize_to)
LOG.debug("swap_volume: Driver volume swap returned, new "
"connection_info is now : %(new_cinfo)s",
{'new_cinfo': new_cinfo})
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 87d98f2cad..6927e7e4a5 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -706,14 +706,26 @@ class ComputeTaskManager(base.Base):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
node = limits = None
+
+ try:
+ migration = objects.Migration.get_by_instance_and_status(
+ context, instance.uuid, 'accepted')
+ except exception.MigrationNotFoundByStatus:
+ LOG.debug("No migration record for the rebuild/evacuate "
+ "request.", instance=instance)
+ migration = None
+
if not host:
if not request_spec:
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is old
# We need to mock that the old way
filter_properties = {'ignore_hosts': [instance.host]}
+ # build_request_spec expects a primitive image dict
+ image_meta = nova_object.obj_to_primitive(
+ instance.image_meta)
request_spec = scheduler_utils.build_request_spec(
- context, image_ref, [instance])
+ context, image_meta, [instance])
elif recreate:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
@@ -741,6 +753,9 @@ class ComputeTaskManager(base.Base):
host_dict['nodename'],
host_dict['limits'])
except exception.NoValidHost as ex:
+ if migration:
+ migration.status = 'error'
+ migration.save()
# Rollback the image_ref if a new one was provided (this
# only happens in the rebuild case, not evacuate).
if orig_image_ref and orig_image_ref != image_ref:
@@ -756,6 +771,9 @@ class ComputeTaskManager(base.Base):
compute_utils.add_instance_fault_from_exc(context,
instance, ex, sys.exc_info())
except exception.UnsupportedPolicyException as ex:
+ if migration:
+ migration.status = 'error'
+ migration.save()
# Rollback the image_ref if a new one was provided (this
# only happens in the rebuild case, not evacuate).
if orig_image_ref and orig_image_ref != image_ref:
@@ -772,14 +790,6 @@ class ComputeTaskManager(base.Base):
compute_utils.add_instance_fault_from_exc(context,
instance, ex, sys.exc_info())
- try:
- migration = objects.Migration.get_by_instance_and_status(
- context, instance.uuid, 'accepted')
- except exception.MigrationNotFoundByStatus:
- LOG.debug("No migration record for the rebuild/evacuate "
- "request.", instance=instance)
- migration = None
-
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "rebuild.scheduled")
diff --git a/nova/network/model.py b/nova/network/model.py
index e591502ebc..f400f2cc15 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -403,8 +403,11 @@ class VIF(Model):
return not self.__eq__(other)
def fixed_ips(self):
- return [fixed_ip for subnet in self['network']['subnets']
- for fixed_ip in subnet['ips']]
+ if self['network']:
+ return [fixed_ip for subnet in self['network']['subnets']
+ for fixed_ip in subnet['ips']]
+ else:
+ return []
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py
index 7b1c019fda..e41415f02b 100644
--- a/nova/network/neutronv2/api.py
+++ b/nova/network/neutronv2/api.py
@@ -127,7 +127,7 @@ class ClientWrapper(clientv20.Client):
"admin credential located in nova.conf"))
raise exception.NeutronAdminCredentialConfigurationInvalid()
except neutron_client_exc.Forbidden as e:
- raise exception.Forbidden(e)
+ raise exception.Forbidden(six.text_type(e))
return ret
return wrapper
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index a50f102e15..37b361870f 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -997,6 +997,12 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
context will be saved which can cause incorrect resource tracking, and
should be avoided.
"""
+ # First check to see if we even have a migration context set and if not
+ # we can exit early without lazy-loading other attributes.
+ if 'migration_context' in self and self.migration_context is None:
+ yield
+ return
+
current_values = {}
for attr_name in _MIGRATION_CONTEXT_ATTRS:
current_values[attr_name] = getattr(self, attr_name)
diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py
index 356c1cc1c2..fd57421c46 100644
--- a/nova/tests/functional/regressions/test_bug_1713783.py
+++ b/nova/tests/functional/regressions/test_bug_1713783.py
@@ -124,7 +124,4 @@ class FailedEvacuateStateTests(test.TestCase,
self.assertEqual('evacuation', migrations[0]['migration_type'])
self.assertEqual(server['id'], migrations[0]['instance_uuid'])
self.assertEqual(self.hostname, migrations[0]['source_compute'])
- self.assertEqual('accepted', migrations[0]['status'])
- # NOTE(elod.illes): Migration status should be 'error' and not
- # 'accepted'. Needs to be replaced when bug #1713783 is fixed.
- # self.assertEqual('error', migrations[0]['status'])
+ self.assertEqual('error', migrations[0]['status'])
diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py
index 56c80eed04..10003de6ff 100644
--- a/nova/tests/unit/compute/test_compute_api.py
+++ b/nova/tests/unit/compute/test_compute_api.py
@@ -2755,7 +2755,8 @@ class _ComputeAPIUnitTestMixIn(object):
instance)
def _test_snapshot_volume_backed(self, quiesce_required, quiesce_fails,
- vm_state=vm_states.ACTIVE):
+ vm_state=vm_states.ACTIVE,
+ snapshot_fails=False):
fake_sys_meta = {'image_min_ram': '11',
'image_min_disk': '22',
'image_container_format': 'ami',
@@ -2801,6 +2802,8 @@ class _ComputeAPIUnitTestMixIn(object):
return {'id': volume_id, 'display_description': ''}
def fake_volume_create_snapshot(context, volume_id, name, description):
+ if snapshot_fails:
+ raise exception.OverQuota(overs="snapshots")
return {'id': '%s-snapshot' % volume_id}
def fake_quiesce_instance(context, instance):
@@ -2850,8 +2853,13 @@ class _ComputeAPIUnitTestMixIn(object):
'tag': None})
# All the db_only fields and the volume ones are removed
- self.compute_api.snapshot_volume_backed(
- self.context, instance, 'test-snapshot')
+ if snapshot_fails:
+ self.assertRaises(exception.OverQuota,
+ self.compute_api.snapshot_volume_backed,
+ self.context, instance, "test-snapshot")
+ else:
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, 'test-snapshot')
self.assertEqual(quiesce_expected, quiesced[0])
self.assertEqual(quiesce_expected, quiesced[1])
@@ -2889,8 +2897,13 @@ class _ComputeAPIUnitTestMixIn(object):
quiesced = [False, False]
# Check that the mappings from the image properties are not included
- self.compute_api.snapshot_volume_backed(
- self.context, instance, 'test-snapshot')
+ if snapshot_fails:
+ self.assertRaises(exception.OverQuota,
+ self.compute_api.snapshot_volume_backed,
+ self.context, instance, "test-snapshot")
+ else:
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, 'test-snapshot')
self.assertEqual(quiesce_expected, quiesced[0])
self.assertEqual(quiesce_expected, quiesced[1])
@@ -2901,6 +2914,11 @@ class _ComputeAPIUnitTestMixIn(object):
def test_snapshot_volume_backed_with_quiesce(self):
self._test_snapshot_volume_backed(True, False)
+ def test_snapshot_volume_backed_with_quiesce_create_snap_fails(self):
+ self._test_snapshot_volume_backed(quiesce_required=True,
+ quiesce_fails=False,
+ snapshot_fails=True)
+
def test_snapshot_volume_backed_with_quiesce_skipped(self):
self._test_snapshot_volume_backed(False, True)
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 394a1a4620..f37e14925b 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -65,6 +65,7 @@ from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
+from nova.volume import cinder
CONF = nova.conf.CONF
@@ -1763,162 +1764,159 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
do_test()
+ @mock.patch.object(virt_driver.ComputeDriver, 'get_volume_connector',
+ return_value={})
+ @mock.patch.object(manager.ComputeManager, '_instance_update',
+ return_value={})
+ @mock.patch.object(db, 'instance_fault_create')
+ @mock.patch.object(db, 'block_device_mapping_update')
+ @mock.patch.object(db,
+ 'block_device_mapping_get_by_instance_and_volume_id')
+ @mock.patch.object(cinder.API, 'migrate_volume_completion')
+ @mock.patch.object(cinder.API, 'terminate_connection')
+ @mock.patch.object(cinder.API, 'unreserve_volume')
+ @mock.patch.object(cinder.API, 'get')
+ @mock.patch.object(cinder.API, 'roll_detaching')
@mock.patch.object(compute_utils, 'notify_about_volume_swap')
- def test_swap_volume_volume_api_usage(self, mock_notify):
+ def _test_swap_volume(self, mock_notify, mock_roll_detaching,
+ mock_cinder_get, mock_unreserve_volume,
+ mock_terminate_connection,
+ mock_migrate_volume_completion,
+ mock_bdm_get, mock_bdm_update,
+ mock_instance_fault_create,
+ mock_instance_update,
+ mock_get_volume_connector,
+ expected_exception=None):
# This test ensures that volume_id arguments are passed to volume_api
# and that volume states are OK
volumes = {}
- old_volume_id = uuids.fake
- volumes[old_volume_id] = {'id': old_volume_id,
+ volumes[uuids.old_volume] = {'id': uuids.old_volume,
'display_name': 'old_volume',
'status': 'detaching',
'size': 1}
- new_volume_id = uuids.fake_2
- volumes[new_volume_id] = {'id': new_volume_id,
+ volumes[uuids.new_volume] = {'id': uuids.new_volume,
'display_name': 'new_volume',
'status': 'available',
'size': 2}
- def fake_vol_api_roll_detaching(cls, context, volume_id):
- self.assertTrue(uuidutils.is_uuid_like(volume_id))
- if volumes[volume_id]['status'] == 'detaching':
- volumes[volume_id]['status'] = 'in-use'
-
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vdb', 'source_type': 'volume',
'destination_type': 'volume',
'instance_uuid': uuids.instance,
'connection_info': '{"foo": "bar"}'})
- def fake_vol_api_func(cls, context, volume, *args):
+ def fake_vol_api_roll_detaching(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'detaching':
+ volumes[volume_id]['status'] = 'in-use'
+
+ def fake_vol_api_func(context, volume, *args):
self.assertTrue(uuidutils.is_uuid_like(volume))
return {}
- def fake_vol_get(cls, context, volume_id):
+ def fake_vol_get(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
return volumes[volume_id]
- def fake_vol_unreserve(cls, context, volume_id):
+ def fake_vol_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
- def fake_vol_migrate_volume_completion(cls, context, old_volume_id,
+ def fake_vol_migrate_volume_completion(context, old_volume_id,
new_volume_id, error=False):
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
self.assertTrue(uuidutils.is_uuid_like(new_volume_id))
volumes[old_volume_id]['status'] = 'in-use'
return {'save_volume_id': new_volume_id}
- def fake_func_exc(*args, **kwargs):
- raise AttributeError # Random exception
-
- def fake_swap_volume(cls, old_connection_info, new_connection_info,
- instance, mountpoint, resize_to):
- self.assertEqual(resize_to, 2)
-
def fake_block_device_mapping_update(ctxt, id, updates, legacy):
self.assertEqual(2, updates['volume_size'])
return fake_bdm
- self.stub_out('nova.volume.cinder.API.roll_detaching',
- fake_vol_api_roll_detaching)
- self.stub_out('nova.volume.cinder.API.get', fake_vol_get)
- self.stub_out('nova.volume.cinder.API.initialize_connection',
- fake_vol_api_func)
- self.stub_out('nova.volume.cinder.API.unreserve_volume',
- fake_vol_unreserve)
- self.stub_out('nova.volume.cinder.API.terminate_connection',
- fake_vol_api_func)
- self.stub_out('nova.db.'
- 'block_device_mapping_get_by_instance_and_volume_id',
- lambda x, y, z, v: fake_bdm)
- self.stub_out('nova.virt.driver.ComputeDriver.get_volume_connector',
- lambda x: {})
- self.stub_out('nova.virt.driver.ComputeDriver.swap_volume',
- fake_swap_volume)
- self.stub_out('nova.volume.cinder.API.migrate_volume_completion',
- fake_vol_migrate_volume_completion)
- self.stub_out('nova.db.block_device_mapping_update',
- fake_block_device_mapping_update)
- self.stub_out('nova.db.instance_fault_create',
- lambda x, y:
- test_instance_fault.fake_faults['fake-uuid'][0])
- self.stub_out('nova.compute.manager.ComputeManager.'
- '_instance_update', lambda c, u, **k: {})
-
- # Good path
+ mock_roll_detaching.side_effect = fake_vol_api_roll_detaching
+ mock_terminate_connection.side_effect = fake_vol_api_func
+ mock_cinder_get.side_effect = fake_vol_get
+ mock_migrate_volume_completion.side_effect = (
+ fake_vol_migrate_volume_completion)
+ mock_unreserve_volume.side_effect = fake_vol_unreserve
+ mock_bdm_get.return_value = fake_bdm
+ mock_bdm_update.side_effect = fake_block_device_mapping_update
+ mock_instance_fault_create.return_value = (
+ test_instance_fault.fake_faults['fake-uuid'][0])
+
instance1 = fake_instance.fake_instance_obj(
self.context, **{'uuid': uuids.instance})
- self.compute.swap_volume(self.context, old_volume_id, new_volume_id,
- instance1)
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(2, mock_notify.call_count)
- mock_notify.assert_any_call(test.MatchType(context.RequestContext),
- instance1, self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.START,
- old_volume_id, new_volume_id)
- mock_notify.assert_any_call(test.MatchType(context.RequestContext),
- instance1, self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.END,
- old_volume_id, new_volume_id)
-
- # Error paths
- mock_notify.reset_mock()
- volumes[old_volume_id]['status'] = 'detaching'
- volumes[new_volume_id]['status'] = 'attaching'
- self.stub_out('nova.virt.fake.FakeDriver.swap_volume',
- fake_func_exc)
- instance2 = fake_instance.fake_instance_obj(
- self.context, **{'uuid': uuids.instance})
- self.assertRaises(AttributeError, self.compute.swap_volume,
- self.context, old_volume_id, new_volume_id,
- instance2)
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
- self.assertEqual(2, mock_notify.call_count)
- mock_notify.assert_any_call(
- test.MatchType(context.RequestContext), instance2,
- self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.START,
- old_volume_id, new_volume_id)
- mock_notify.assert_any_call(
- test.MatchType(context.RequestContext), instance2,
- self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.ERROR,
- old_volume_id, new_volume_id,
- test.MatchType(AttributeError))
-
- mock_notify.reset_mock()
- volumes[old_volume_id]['status'] = 'detaching'
- volumes[new_volume_id]['status'] = 'attaching'
- self.stub_out('nova.volume.cinder.API.initialize_connection',
- fake_func_exc)
- instance3 = fake_instance.fake_instance_obj(
- self.context, **{'uuid': uuids.instance})
- self.assertRaises(AttributeError, self.compute.swap_volume,
- self.context, old_volume_id, new_volume_id,
- instance3)
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
- self.assertEqual(2, mock_notify.call_count)
- mock_notify.assert_any_call(
- test.MatchType(context.RequestContext), instance3,
- self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.START,
- old_volume_id, new_volume_id)
- mock_notify.assert_any_call(
- test.MatchType(context.RequestContext), instance3,
- self.compute.host,
- fields.NotificationAction.VOLUME_SWAP,
- fields.NotificationPhase.ERROR,
- old_volume_id, new_volume_id,
- test.MatchType(AttributeError))
+
+ if expected_exception:
+ volumes[uuids.old_volume]['status'] = 'detaching'
+ volumes[uuids.new_volume]['status'] = 'attaching'
+ self.assertRaises(expected_exception, self.compute.swap_volume,
+ self.context, uuids.old_volume, uuids.new_volume,
+ instance1)
+ self.assertEqual('in-use', volumes[uuids.old_volume]['status'])
+ self.assertEqual('available', volumes[uuids.new_volume]['status'])
+ self.assertEqual(2, mock_notify.call_count)
+ mock_notify.assert_any_call(
+ test.MatchType(context.RequestContext), instance1,
+ self.compute.host,
+ fields.NotificationAction.VOLUME_SWAP,
+ fields.NotificationPhase.START,
+ uuids.old_volume, uuids.new_volume)
+ mock_notify.assert_any_call(
+ test.MatchType(context.RequestContext), instance1,
+ self.compute.host,
+ fields.NotificationAction.VOLUME_SWAP,
+ fields.NotificationPhase.ERROR,
+ uuids.old_volume, uuids.new_volume,
+ test.MatchType(expected_exception))
+ else:
+ self.compute.swap_volume(self.context, uuids.old_volume,
+ uuids.new_volume, instance1)
+ self.assertEqual(volumes[uuids.old_volume]['status'], 'in-use')
+ self.assertEqual(2, mock_notify.call_count)
+ mock_notify.assert_any_call(test.MatchType(context.RequestContext),
+ instance1, self.compute.host,
+ fields.NotificationAction.VOLUME_SWAP,
+ fields.NotificationPhase.START,
+ uuids.old_volume, uuids.new_volume)
+ mock_notify.assert_any_call(test.MatchType(context.RequestContext),
+ instance1, self.compute.host,
+ fields.NotificationAction.VOLUME_SWAP,
+ fields.NotificationPhase.END,
+ uuids.old_volume, uuids.new_volume)
+
+ def _assert_volume_api(self, context, volume, *args):
+ self.assertTrue(uuidutils.is_uuid_like(volume))
+ return {}
+
+ def _assert_swap_volume(self, context, old_connection_info,
+ new_connection_info, instance, mountpoint,
+ resize_to):
+ self.assertEqual(2, resize_to)
+
+ @mock.patch.object(cinder.API, 'initialize_connection')
+ @mock.patch.object(fake_driver.FakeDriver, 'swap_volume')
+ def test_swap_volume_volume_api_usage(self, mock_swap_volume,
+ mock_initialize_connection):
+ mock_initialize_connection.side_effect = self._assert_volume_api
+ mock_swap_volume.side_effect = self._assert_swap_volume
+ self._test_swap_volume()
+
+ @mock.patch.object(cinder.API, 'initialize_connection')
+ @mock.patch.object(fake_driver.FakeDriver, 'swap_volume',
+ side_effect=test.TestingException())
+ def test_swap_volume_with_compute_driver_exception(
+ self, mock_swap_volume, mock_initialize_connection):
+ mock_initialize_connection.side_effect = self._assert_volume_api
+ self._test_swap_volume(expected_exception=test.TestingException)
+
+ @mock.patch.object(cinder.API, 'initialize_connection',
+ side_effect=test.TestingException())
+ @mock.patch.object(fake_driver.FakeDriver, 'swap_volume')
+ def test_swap_volume_with_initialize_connection_exception(
+ self, mock_swap_volume, mock_initialize_connection):
+ self._test_swap_volume(expected_exception=test.TestingException)
@mock.patch('nova.compute.utils.notify_about_volume_swap')
@mock.patch('nova.db.block_device_mapping_get_by_instance_and_volume_id')
@@ -3022,7 +3020,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
ex = exception.InstanceNotFound(instance_id=instance.uuid)
self._test_rebuild_ex(instance, ex)
- def test_rebuild_node_not_updated_if_not_recreate(self):
+ # A rebuild to the same host should never attempt a rebuild claim.
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.rebuild_claim',
+ new_callable=mock.NonCallableMock)
+ def test_rebuild_node_not_updated_if_not_recreate(self,
+ mock_rebuild_claim):
node = uuidutils.generate_uuid() # ironic node uuid
instance = fake_instance.fake_instance_obj(self.context, node=node)
instance.migration_context = None
@@ -3776,6 +3778,9 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.block_device_mapping, self.node, self.limits, {})
mock_clean_net.assert_called_once_with(self.context, self.instance,
self.requested_networks)
+ mock_clean_vol.assert_called_once_with(self.context,
+ self.instance.uuid, self.block_device_mapping,
+ raise_exc=False)
mock_add.assert_called_once_with(self.context, self.instance,
mock.ANY, mock.ANY, fault_message=mock.ANY)
mock_nil.assert_called_once_with(self.instance)
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index 2d21f092f8..8c74faec2c 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -397,6 +397,92 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute.unshelve_instance(self.context, instance, image=None,
filter_properties=filter_properties, node=node)
+ @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
+ @mock.patch('nova.compute.utils.notify_about_instance_action')
+ @mock.patch.object(nova.compute.resource_tracker.ResourceTracker,
+ 'instance_claim')
+ @mock.patch('nova.network.neutronv2.api.API.'
+ 'setup_instance_network_on_host')
+ @mock.patch.object(nova.virt.fake.SmallFakeDriver, 'spawn',
+ side_effect=test.TestingException('oops!'))
+ @mock.patch.object(nova.compute.manager.ComputeManager,
+ '_prep_block_device', return_value='fake_bdm')
+ @mock.patch.object(nova.compute.manager.ComputeManager,
+ '_notify_about_instance_usage')
+ @mock.patch('nova.utils.get_image_from_system_metadata')
+ @mock.patch.object(nova.compute.manager.ComputeManager,
+ '_terminate_volume_connections')
+ def test_unshelve_spawn_fails_cleanup_volume_connections(
+ self, mock_terminate_volume_connections, mock_image_meta,
+ mock_notify_instance_usage, mock_prep_block_device, mock_spawn,
+ mock_setup_network, mock_instance_claim,
+ mock_notify_instance_action, mock_get_bdms):
+ """Tests error handling when a instance fails to unshelve and makes
+ sure that volume connections are cleaned up from the host
+ and that the host/node values are unset on the instance.
+ """
+ mock_bdms = mock.Mock()
+ mock_get_bdms.return_value = mock_bdms
+ instance = self._create_fake_instance_obj()
+ node = test_compute.NODENAME
+ limits = {}
+ filter_properties = {'limits': limits}
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+ image_meta = {'properties': {'base_image_ref': uuids.image_id}}
+ mock_image_meta.return_value = image_meta
+
+ tracking = {'last_state': instance.task_state}
+
+ def fake_claim(context, instance, node, limits):
+ instance.host = self.compute.host
+ instance.node = node
+ requests = objects.InstancePCIRequests(requests=[])
+ return claims.Claim(context, instance, node,
+ self.rt, _fake_resources(),
+ requests, limits=limits)
+ mock_instance_claim.side_effect = fake_claim
+
+ def check_save(expected_task_state=None):
+ if tracking['last_state'] == task_states.UNSHELVING:
+ # This is before we've failed.
+ self.assertEqual(task_states.SPAWNING, instance.task_state)
+ tracking['last_state'] = instance.task_state
+ elif tracking['last_state'] == task_states.SPAWNING:
+ # This is after we've failed.
+ self.assertIsNone(instance.host)
+ self.assertIsNone(instance.node)
+ self.assertIsNone(instance.task_state)
+ tracking['last_state'] = instance.task_state
+ else:
+ self.fail('Unexpected save!')
+
+ with mock.patch.object(instance, 'save') as mock_save:
+ mock_save.side_effect = check_save
+ self.assertRaises(test.TestingException,
+ self.compute.unshelve_instance,
+ self.context, instance, image=None,
+ filter_properties=filter_properties, node=node)
+
+ mock_notify_instance_action.assert_called_once_with(
+ self.context, instance, 'fake-mini', action='unshelve',
+ phase='start')
+ mock_notify_instance_usage.assert_called_once_with(
+ self.context, instance, 'unshelve.start')
+ mock_prep_block_device.assert_called_once_with(
+ self.context, instance, mock_bdms, do_check_attach=False)
+ mock_setup_network.assert_called_once_with(self.context, instance,
+ self.compute.host)
+ mock_instance_claim.assert_called_once_with(self.context, instance,
+ test_compute.NODENAME,
+ limits)
+ mock_spawn.assert_called_once_with(
+ self.context, instance, test.MatchType(objects.ImageMeta),
+ injected_files=[], admin_password=None,
+ network_info=[], block_device_info='fake_bdm')
+ mock_terminate_volume_connections.assert_called_once_with(
+ self.context, instance, mock_bdms)
+
@mock.patch.object(objects.InstanceList, 'get_by_filters')
def test_shelved_poll_none_offloaded(self, mock_get_by_filters):
# Test instances are not offloaded when shelved_offload_time is -1
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index a90a1ae750..b88d9f546e 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -1239,6 +1239,9 @@ class _BaseTaskTestCase(object):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
+ bs_mock.assert_called_once_with(
+ self.context, obj_base.obj_to_primitive(inst_obj.image_meta),
+ [inst_obj])
fp_mock.assert_called_once_with(self.context, request_spec,
filter_properties)
select_dest_mock.assert_called_once_with(self.context, fake_spec)
@@ -1310,6 +1313,15 @@ class _BaseTaskTestCase(object):
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
+ # Create the migration record (normally created by the compute API).
+ migration = objects.Migration(self.context,
+ source_compute=inst_obj.host,
+ source_node=inst_obj.node,
+ instance_uuid=inst_obj.uuid,
+ status='accepted',
+ migration_type='evacuation')
+ migration.create()
+
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor.rebuild_instance,
self.context,
@@ -1324,6 +1336,10 @@ class _BaseTaskTestCase(object):
self.assertIn('ServerGroup policy is not supported',
inst_obj.fault.message)
+ # Assert the migration status was updated.
+ migration = objects.Migration.get_by_id(self.context, migration.id)
+ self.assertEqual('error', migration.status)
+
def test_rebuild_instance_evacuate_migration_record(self):
inst_obj = self._create_fake_instance_obj()
migration = objects.Migration(context=self.context,
diff --git a/nova/tests/unit/network/test_network_info.py b/nova/tests/unit/network/test_network_info.py
index 100edbd4c8..5e0e20d933 100644
--- a/nova/tests/unit/network/test_network_info.py
+++ b/nova/tests/unit/network/test_network_info.py
@@ -423,6 +423,11 @@ class VIFTests(test.NoDBTestCase):
] * 2
self.assertEqual(fixed_ips, ips)
+ def test_vif_get_fixed_ips_network_is_none(self):
+ vif = model.VIF()
+ fixed_ips = vif.fixed_ips()
+ self.assertEqual([], fixed_ips)
+
def test_vif_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
diff --git a/nova/tests/unit/network/test_neutronv2.py b/nova/tests/unit/network/test_neutronv2.py
index 42428c052f..a8427de481 100644
--- a/nova/tests/unit/network/test_neutronv2.py
+++ b/nova/tests/unit/network/test_neutronv2.py
@@ -181,9 +181,10 @@ class TestNeutronClient(test.NoDBTestCase):
auth_token='token',
is_admin=False)
client = neutronapi.get_client(my_context)
- self.assertRaises(
+ exc = self.assertRaises(
exception.Forbidden,
client.create_port)
+ self.assertIsInstance(exc.format_message(), six.text_type)
def test_withtoken_context_is_admin(self):
self.flags(url='http://anyhost/', group='neutron')
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
index 8f05d1725b..6ff9ca8362 100644
--- a/nova/tests/unit/objects/test_instance.py
+++ b/nova/tests/unit/objects/test_instance.py
@@ -1439,6 +1439,20 @@ class _TestInstanceObject(object):
inst_value = getattr(inst, attr_name)
self.assertIs(expected_objs[attr_name], inst_value)
+ @mock.patch('nova.objects.Instance.obj_load_attr',
+ new_callable=mock.NonCallableMock) # asserts not called
+ def test_mutated_migration_context_early_exit(self, obj_load_attr):
+ """Tests that we exit early from mutated_migration_context if the
+ migration_context attribute is set to None meaning this instance is
+ not being migrated.
+ """
+ inst = instance.Instance(context=self.context, migration_context=None)
+ for attr in instance._MIGRATION_CONTEXT_ATTRS:
+ self.assertNotIn(attr, inst)
+ with inst.mutated_migration_context():
+ for attr in instance._MIGRATION_CONTEXT_ATTRS:
+ self.assertNotIn(attr, inst)
+
def test_clear_numa_topology(self):
numa_topology = (test_instance_numa_topology.
fake_obj_numa_topology.obj_clone())
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 998fb0cd5d..dfeb72fb27 100755
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -10820,6 +10820,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
]
self.assertEqual(wantFiles, gotFiles)
+ def test_injection_info_is_sanitized(self):
+ info = get_injection_info(
+ network_info=mock.sentinel.network_info,
+ files=mock.sentinel.files,
+ admin_pass='verybadpass')
+ self.assertNotIn('verybadpass', str(info))
+ self.assertNotIn('verybadpass', repr(info))
+
@mock.patch(
'nova.virt.libvirt.driver.LibvirtDriver._build_device_metadata')
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@@ -14944,6 +14952,26 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertTrue(instance.cleaned)
save.assert_called_once_with()
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
+ def test_swap_volume_native_luks_blocked(self, mock_get_encryption):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ # dest volume is encrypted
+ mock_get_encryption.side_effect = [{}, {'provider': 'luks'}]
+ self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
+ {}, {}, None, None, None)
+
+ # src volume is encrypted
+ mock_get_encryption.side_effect = [{'provider': 'luks'}, {}]
+ self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
+ {}, {}, None, None, None)
+
+ # both volumes are encrypted
+ mock_get_encryption.side_effect = [{'provider': 'luks'},
+ {'provider': 'luks'}]
+ self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
+ {}, {}, None, None, None)
+
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete',
return_value=True)
def _test_swap_volume(self, mock_is_job_complete, source_type,
@@ -15120,8 +15148,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
conf = mock.MagicMock(source_path='/fake-new-volume')
get_volume_config.return_value = conf
- conn.swap_volume(old_connection_info, new_connection_info, instance,
- '/dev/vdb', 1)
+ conn.swap_volume(self.context, old_connection_info,
+ new_connection_info, instance, '/dev/vdb', 1)
get_guest.assert_called_once_with(instance)
connect_volume.assert_called_once_with(new_connection_info, disk_info)
@@ -15138,6 +15166,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
def test_swap_volume_driver_source_is_snapshot(self):
self._test_swap_volume_driver(source_type='snapshot')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.rebase')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@@ -15147,7 +15176,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
def test_swap_volume_disconnect_new_volume_on_rebase_error(self,
write_config, get_guest, get_disk, get_volume_config,
- connect_volume, disconnect_volume, rebase):
+ connect_volume, disconnect_volume, rebase,
+ get_volume_encryption):
"""Assert that disconnect_volume is called for the new volume if an
error is encountered while rebasing
"""
@@ -15155,12 +15185,13 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance = objects.Instance(**self.test_instance)
guest = libvirt_guest.Guest(mock.MagicMock())
get_guest.return_value = guest
+ get_volume_encryption.return_value = {}
exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
'internal error', error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
rebase.side_effect = exc
self.assertRaises(exception.VolumeRebaseFailed, conn.swap_volume,
- mock.sentinel.old_connection_info,
+ self.context, mock.sentinel.old_connection_info,
mock.sentinel.new_connection_info,
instance, '/dev/vdb', 0)
connect_volume.assert_called_once_with(
@@ -15169,6 +15200,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
disconnect_volume.assert_called_once_with(
mock.sentinel.new_connection_info, 'vdb')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
@mock.patch('nova.virt.libvirt.guest.BlockDevice.abort_job')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@@ -15179,7 +15211,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
def test_swap_volume_disconnect_new_volume_on_pivot_error(self,
write_config, get_guest, get_disk, get_volume_config,
- connect_volume, disconnect_volume, abort_job, is_job_complete):
+ connect_volume, disconnect_volume, abort_job, is_job_complete,
+ get_volume_encryption):
"""Assert that disconnect_volume is called for the new volume if an
error is encountered while pivoting to the new volume
"""
@@ -15187,13 +15220,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance = objects.Instance(**self.test_instance)
guest = libvirt_guest.Guest(mock.MagicMock())
get_guest.return_value = guest
+ get_volume_encryption.return_value = {}
exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
'internal error', error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
is_job_complete.return_value = True
abort_job.side_effect = [None, exc]
self.assertRaises(exception.VolumeRebaseFailed, conn.swap_volume,
- mock.sentinel.old_connection_info,
+ self.context, mock.sentinel.old_connection_info,
mock.sentinel.new_connection_info,
instance, '/dev/vdb', 0)
connect_volume.assert_called_once_with(
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
index e576fcd79d..7bacb2df68 100644
--- a/nova/tests/unit/virt/test_block_device.py
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -1085,3 +1085,28 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# can't assert_not_called if the method isn't in the spec.
self.assertFalse(hasattr(test_eph, 'refresh_connection_info'))
self.assertFalse(hasattr(test_swap, 'refresh_connection_info'))
+
+
+class TestGetVolumeId(test.NoDBTestCase):
+
+ def test_get_volume_id_none_found(self):
+ self.assertIsNone(driver_block_device.get_volume_id(None))
+ self.assertIsNone(driver_block_device.get_volume_id({}))
+ self.assertIsNone(driver_block_device.get_volume_id({'data': {}}))
+
+ def test_get_volume_id_found_volume_id_no_serial(self):
+ self.assertEqual(uuids.volume_id,
+ driver_block_device.get_volume_id(
+ {'data': {'volume_id': uuids.volume_id}}))
+
+ def test_get_volume_id_found_no_volume_id_serial(self):
+ self.assertEqual(uuids.serial,
+ driver_block_device.get_volume_id(
+ {'serial': uuids.serial}))
+
+ def test_get_volume_id_found_both(self):
+ # volume_id is taken over serial
+ self.assertEqual(uuids.volume_id,
+ driver_block_device.get_volume_id(
+ {'serial': uuids.serial,
+ 'data': {'volume_id': uuids.volume_id}}))
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index bc23e19b1a..b2bb38b8a2 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -488,7 +488,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
instance_ref,
'/dev/sda'))
self.assertIsNone(
- self.connection.swap_volume({'driver_volume_type': 'fake',
+ self.connection.swap_volume(None, {'driver_volume_type': 'fake',
'data': {}},
{'driver_volume_type': 'fake',
'data': {}},
diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py
index 1da9d09912..edcf5c5496 100644
--- a/nova/virt/block_device.py
+++ b/nova/virt/block_device.py
@@ -570,3 +570,13 @@ def is_block_device_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank')
and bdm.destination_type == 'volume'
and is_implemented(bdm))
+
+
+def get_volume_id(connection_info):
+ if connection_info:
+ # Check for volume_id in 'data' and if not there, fallback to
+ # the 'serial' that the DriverVolumeBlockDevice adds during attach.
+ volume_id = connection_info.get('data', {}).get('volume_id')
+ if not volume_id:
+ volume_id = connection_info.get('serial')
+ return volume_id
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index fc15519c4e..6736296501 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -455,10 +455,11 @@ class ComputeDriver(object):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
- def swap_volume(self, old_connection_info, new_connection_info,
+ def swap_volume(self, context, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the volume attached to the given `instance`.
+ :param context: The request context.
:param dict old_connection_info:
The volume for this connection gets detached from the given
`instance`.
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 1a4d5bec68..846c076b16 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -298,7 +298,7 @@ class FakeDriver(driver.ComputeDriver):
except KeyError:
pass
- def swap_volume(self, old_connection_info, new_connection_info,
+ def swap_volume(self, context, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance."""
instance_name = instance.name
diff --git a/nova/virt/images.py b/nova/virt/images.py
index fc4db6275e..59e1ddf24d 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -39,7 +39,7 @@ CONF = nova.conf.CONF
IMAGE_API = image.API()
QEMU_IMG_LIMITS = processutils.ProcessLimits(
- cpu_time=8,
+ cpu_time=30,
address_space=1 * units.Gi)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index ec1d1917ea..6311d9c28c 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -141,8 +141,14 @@ CONSOLE = "console=tty0 console=ttyS0"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
-InjectionInfo = collections.namedtuple(
- 'InjectionInfo', ['network_info', 'files', 'admin_pass'])
+
+class InjectionInfo(collections.namedtuple(
+ 'InjectionInfo', ['network_info', 'files', 'admin_pass'])):
+ __slots__ = ()
+
+ def __repr__(self):
+ return ('InjectionInfo(network_info=%r, files=%r, '
+ 'admin_pass=<SANITIZED>)') % (self.network_info, self.files)
libvirt_volume_drivers = [
'iscsi=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
@@ -1194,6 +1200,16 @@ class LibvirtDriver(driver.ComputeDriver):
**encryption)
return encryptor
+ def _get_volume_encryption(self, context, connection_info):
+ """Get the encryption metadata dict if it is not provided
+ """
+ encryption = {}
+ volume_id = driver_block_device.get_volume_id(connection_info)
+ if volume_id:
+ encryption = encryptors.get_encryption_metadata(context,
+ self._volume_api, volume_id, connection_info)
+ return encryption
+
def _check_discard_for_attach_volume(self, conf, instance):
"""Perform some checks for volumes configured for discard support.
@@ -1342,9 +1358,19 @@ class LibvirtDriver(driver.ComputeDriver):
finally:
self._host.write_instance_config(xml)
- def swap_volume(self, old_connection_info,
+ def swap_volume(self, context, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
+ # NOTE(lyarwood): Bug #1739593 uncovered a nasty data corruption
+ # issue that was fixed in Queens by Ica323b87fa85a454fca9d46ada3677f18.
+ # Given the size of the bugfix it was agreed not to backport the change
+ # to earlier stable branches and to instead block swap volume attempts.
+ if (self._get_volume_encryption(context, old_connection_info) or
+ self._get_volume_encryption(context, new_connection_info)):
+ raise NotImplementedError(_("Swap volume is not supported when "
+ "using encrypted volumes. For more details see "
+ "https://bugs.launchpad.net/nova/+bug/1739593."))
+
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
@@ -3076,8 +3102,8 @@ class LibvirtDriver(driver.ComputeDriver):
:param injection_info: Injection info
"""
# Handles the partition need to be used.
- LOG.debug('Checking root disk injection %(info)s',
- info=str(injection_info), instance=instance)
+ LOG.debug('Checking root disk injection %s',
+ str(injection_info), instance=instance)
target_partition = None
if not instance.kernel_id:
target_partition = CONF.libvirt.inject_partition
@@ -3107,7 +3133,7 @@ class LibvirtDriver(driver.ComputeDriver):
metadata = instance.get('metadata')
if any((key, net, metadata, admin_pass, injection_info.files)):
- LOG.debug('Injecting %(info)s', info=str(injection_info),
+ LOG.debug('Injecting %s', str(injection_info),
instance=instance)
img_id = instance.image_ref
try:
diff --git a/releasenotes/notes/bug-1739593-cve-2017-18191-25fe48d336d8cf13.yaml b/releasenotes/notes/bug-1739593-cve-2017-18191-25fe48d336d8cf13.yaml
new file mode 100644
index 0000000000..915b34053e
--- /dev/null
+++ b/releasenotes/notes/bug-1739593-cve-2017-18191-25fe48d336d8cf13.yaml
@@ -0,0 +1,9 @@
+---
+prelude: >
+ This release includes fixes for security vulnerabilities.
+security:
+ - |
+ [CVE-2017-18191] Swapping encrypted volumes can lead to data loss and a
+ possible compute host DOS attack.
+
+ * `Bug 1739593 <https://bugs.launchpad.net/nova/+bug/1739593>`_