summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLee Yarwood <lyarwood@redhat.com>2019-04-15 23:31:02 +0100
committerLee Yarwood <lyarwood@redhat.com>2020-09-22 12:22:41 +0100
commitf63c3f5c4b1aff641697ff02d1c596c2ea6bb172 (patch)
tree20d20d3e13cc5f1472cf2e8501da48df8619e86b
parentb5f324c69057c8eb77abb63d5e83cc1c5e13d256 (diff)
downloadnova-f63c3f5c4b1aff641697ff02d1c596c2ea6bb172.tar.gz
compute: Use source_bdms to reset attachment_ids during LM rollback
As we now provide the original source bdms during rollback we can also use them to reset any volume attachment ids to the ids used by the source compute host. Change-Id: Ibe9215c07a1ee00e0e121c69bcf7ee1b1b80fae0 (cherry picked from commit 0eea5041c4047bf3ab8bf71973926d80592e5e5c) (cherry picked from commit 8b865378b8d238abaac620d31b98f0ff2c355941) (cherry picked from commit b8a1f08bc828b25871d5db2071e103330e248f6c)
-rw-r--r--nova/compute/manager.py25
-rw-r--r--nova/objects/migrate_data.py2
-rw-r--r--nova/tests/unit/compute/test_compute.py31
3 files changed, 34 insertions, 24 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index af8496239e..4a9c0a5499 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -6282,7 +6282,7 @@ class ComputeManager(manager.Manager):
# save current attachment so we can detach it on success,
# or restore it on a rollback.
# NOTE(mdbooth): This data is no longer used by the source
- # host since change I0390c9ff. We can't remove it until we
+ # host since change Ibe9215c0. We can't remove it until we
# are sure the source host has been upgraded.
migrate_data.old_vol_attachment_ids[bdm.volume_id] = \
bdm.attachment_id
@@ -6921,24 +6921,17 @@ class ComputeManager(manager.Manager):
self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest)
- if bdm.attachment_id:
- # 3.44 cinder api flow. Set the bdm's
- # attachment_id to the old attachment of the source
- # host. If old_attachments is not there, then
- # there was an error before the new attachment was made.
- # TODO(lyarwood): migrate_data.old_vol_attachment_ids can
- # be removed now as we can lookup the original
- # attachment_ids from the source_bdms list here.
- old_attachments = migrate_data.old_vol_attachment_ids \
- if 'old_vol_attachment_ids' in migrate_data else None
- if old_attachments and bdm.volume_id in old_attachments:
- self.volume_api.attachment_delete(context,
- bdm.attachment_id)
- bdm.attachment_id = old_attachments[bdm.volume_id]
+ source_bdm = source_bdms_by_volid[bdm.volume_id]
+ if bdm.attachment_id and source_bdm.attachment_id:
+ # NOTE(lyarwood): 3.44 cinder api flow. Delete the
+ # attachment used by the destination and reset the bdm
+ # attachment_id to the old attachment of the source host.
+ self.volume_api.attachment_delete(context,
+ bdm.attachment_id)
+ bdm.attachment_id = source_bdm.attachment_id
# NOTE(lyarwood): Rollback the connection_info stored within
# the BDM to that used by the source and not the destination.
- source_bdm = source_bdms_by_volid[bdm.volume_id]
bdm.connection_info = source_bdm.connection_info
bdm.save()
diff --git a/nova/objects/migrate_data.py b/nova/objects/migrate_data.py
index 4fe4966169..2be3852002 100644
--- a/nova/objects/migrate_data.py
+++ b/nova/objects/migrate_data.py
@@ -32,7 +32,7 @@ class LiveMigrateData(obj_base.NovaObject):
# old_vol_attachment_ids is a dict used to store the old attachment_ids
# for each volume so they can be restored on a migration rollback. The
# key is the volume_id, and the value is the attachment_id.
- # TODO(mdbooth): This field was made redundant by change I0390c9ff. We
+ # TODO(mdbooth): This field was made redundant by change Ibe9215c0. We
# should eventually remove it.
'old_vol_attachment_ids': fields.DictOfStringsField(),
}
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 2bb0fbbc02..f8dab8a86d 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -6218,6 +6218,7 @@ class ComputeTestCase(BaseTestCase,
# cleanup
db.instance_destroy(c, instance['uuid'])
+ @mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(fake.FakeDriver, 'get_instance_disk_info')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
@mock.patch.object(objects.ComputeNode,
@@ -6230,7 +6231,8 @@ class ComputeTestCase(BaseTestCase,
def test_live_migration_exception_rolls_back(self, mock_save,
mock_rollback, mock_remove,
mock_get_bdms,
- mock_get_node, mock_pre, mock_get_disk):
+ mock_get_node, mock_pre, mock_get_disk,
+ mock_attachment_delete):
# Confirm exception when pre_live_migration fails.
c = context.get_admin_context()
@@ -6248,20 +6250,26 @@ class ComputeTestCase(BaseTestCase,
# All the fake BDMs we've generated, in order
fake_bdms = []
+ # A list of the attachment_ids returned by gen_fake_bdms
+ fake_attachment_ids = []
+
def gen_fake_bdms(obj, instance):
- # generate a unique fake connection_info every time we're called,
- # simulating connection_info being mutated elsewhere.
+ # generate a unique fake connection_info and attachment_id every
+ # time we're called, simulating attachment_id and connection_info
+ # being mutated elsewhere.
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
+ **fake_block_device.AnonFakeDbBlockDeviceDict(
{'volume_id': uuids.volume_id_1,
+ 'attachment_id': uuidutils.generate_uuid(),
'source_type': 'volume',
'connection_info':
jsonutils.dumps(uuidutils.generate_uuid()),
'destination_type': 'volume'})),
objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
+ **fake_block_device.AnonFakeDbBlockDeviceDict(
{'volume_id': uuids.volume_id_2,
+ 'attachment_id': uuidutils.generate_uuid(),
'source_type': 'volume',
'connection_info':
jsonutils.dumps(uuidutils.generate_uuid()),
@@ -6269,6 +6277,7 @@ class ComputeTestCase(BaseTestCase,
])
for bdm in bdms:
bdm.save = mock.Mock()
+ fake_attachment_ids.append(bdm.attachment_id)
fake_bdms.append(bdms)
return bdms
@@ -6320,10 +6329,13 @@ class ComputeTestCase(BaseTestCase,
# BDMs with unique connection_info every time it's called. These are
# stored in fake_bdms in the order they were generated. We assert here
# that the last BDMs generated (in _rollback_live_migration) now have
- # the same connection_info as the first BDMs generated (before calling
- # pre_live_migration), and that we saved them.
+ # the same connection_info and attachment_id as the first BDMs
+ # generated (before calling pre_live_migration), and that we saved
+ # them.
self.assertGreater(len(fake_bdms), 1)
for source_bdm, final_bdm in zip(fake_bdms[0], fake_bdms[-1]):
+ self.assertEqual(source_bdm.attachment_id,
+ final_bdm.attachment_id)
self.assertEqual(source_bdm.connection_info,
final_bdm.connection_info)
final_bdm.save.assert_called()
@@ -6336,6 +6348,11 @@ class ComputeTestCase(BaseTestCase,
destroy_disks=True,
migrate_data=test.MatchType(
migrate_data_obj.XenapiLiveMigrateData))
+ # Assert that the final attachment_ids returned by
+ # BlockDeviceMappingList.get_by_instance_uuid are then deleted.
+ mock_attachment_delete.assert_has_calls([
+ mock.call(c, fake_attachment_ids.pop()),
+ mock.call(c, fake_attachment_ids.pop())], any_order=True)
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
@mock.patch.object(compute_rpcapi.ComputeAPI,