summaryrefslogtreecommitdiff
path: root/nova/virt/block_device.py
diff options
context:
space:
mode:
authorMatt Riedemann <mriedem.os@gmail.com>2017-12-06 14:47:08 -0500
committerMatt Riedemann <mriedem.os@gmail.com>2017-12-06 16:40:06 -0500
commit82de8bc5acf67f083d12015527d8784ac8b2f7c4 (patch)
treea1b226e2aa11b56cccbd85e221f4f950b6b534f1 /nova/virt/block_device.py
parentcbdc893032f10db06a13984df6f8710cb9f9429d (diff)
downloadnova-82de8bc5acf67f083d12015527d8784ac8b2f7c4.tar.gz
Avoid stashed connector lookup for new style detach
We can avoid all of the stashed connector hulabaloo when detaching a volume if we're using a new style attachment, because we don't need a host connector in that case, stashed or not, we just delete the attachment and we're done. This also helps to avoid some confusion about whether or not we need to stash a connector in the connection_info for the new style attachment flow (we don't). Part of blueprint cinder-new-attach-apis Change-Id: I1404842f91279dd40ec9e03ccbbfe11cfc48520c
Diffstat (limited to 'nova/virt/block_device.py')
-rw-r--r--nova/virt/block_device.py78
1 files changed, 39 insertions, 39 deletions
diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py
index 59b35ac19e..12e275406f 100644
--- a/nova/virt/block_device.py
+++ b/nova/virt/block_device.py
@@ -292,8 +292,6 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
def detach(self, context, instance, volume_api, virt_driver,
attachment_id=None, destroy_bdm=False):
- connector = virt_driver.get_volume_connector(instance)
- connection_info = self['connection_info']
volume_id = self.volume_id
# Only attempt to detach and disconnect from the volume if the instance
@@ -310,48 +308,50 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
{'vol_id': volume_id,
'inst_host': instance.host}, instance=instance)
- if connection_info and not destroy_bdm and (
- connector.get('host') != instance.host):
- # If the volume is attached to another host (evacuate) then
- # this connector is for the wrong host. Use the connector that
- # was stored in connection_info instead (if we have one, and it
- # is for the expected host).
- stashed_connector = connection_info.get('connector')
- if not stashed_connector:
- # Volume was attached before we began stashing connectors
- LOG.warning("Host mismatch detected, but stashed "
- "volume connector not found. Instance host is "
- "%(ihost)s, but volume connector host is "
- "%(chost)s.",
- {'ihost': instance.host,
- 'chost': connector.get('host')})
- elif stashed_connector.get('host') != instance.host:
- # Unexpected error. The stashed connector is also not matching
- # the needed instance host.
- LOG.error("Host mismatch detected in stashed volume "
- "connector. Will use local volume connector. "
- "Instance host is %(ihost)s. Local volume "
- "connector host is %(chost)s. Stashed volume "
- "connector host is %(schost)s.",
- {'ihost': instance.host,
- 'chost': connector.get('host'),
- 'schost': stashed_connector.get('host')})
- else:
- # Fix found. Use stashed connector.
- LOG.debug("Host mismatch detected. Found usable stashed "
- "volume connector. Instance host is %(ihost)s. "
- "Local volume connector host was %(chost)s. "
- "Stashed volume connector host is %(schost)s.",
- {'ihost': instance.host,
- 'chost': connector.get('host'),
- 'schost': stashed_connector.get('host')})
- connector = stashed_connector
-
# NOTE(jdg): For now we need to actually inspect the bdm for an
# attachment_id as opposed to relying on what may have been passed
# in, we want to force usage of the old detach flow for now and only
# use the new flow when we explicitly used it for the attach.
if not self['attachment_id']:
+ connector = virt_driver.get_volume_connector(instance)
+ connection_info = self['connection_info']
+ if connection_info and not destroy_bdm and (
+ connector.get('host') != instance.host):
+ # If the volume is attached to another host (evacuate) then
+ # this connector is for the wrong host. Use the connector that
+ # was stored in connection_info instead (if we have one, and it
+ # is for the expected host).
+ stashed_connector = connection_info.get('connector')
+ if not stashed_connector:
+ # Volume was attached before we began stashing connectors
+ LOG.warning("Host mismatch detected, but stashed "
+ "volume connector not found. Instance host is "
+ "%(ihost)s, but volume connector host is "
+ "%(chost)s.",
+ {'ihost': instance.host,
+ 'chost': connector.get('host')})
+ elif stashed_connector.get('host') != instance.host:
+ # Unexpected error. The stashed connector is also not
+ # matching the needed instance host.
+ LOG.error("Host mismatch detected in stashed volume "
+ "connector. Will use local volume connector. "
+ "Instance host is %(ihost)s. Local volume "
+ "connector host is %(chost)s. Stashed volume "
+ "connector host is %(schost)s.",
+ {'ihost': instance.host,
+ 'chost': connector.get('host'),
+ 'schost': stashed_connector.get('host')})
+ else:
+ # Fix found. Use stashed connector.
+ LOG.debug("Host mismatch detected. Found usable stashed "
+ "volume connector. Instance host is %(ihost)s. "
+ "Local volume connector host was %(chost)s. "
+ "Stashed volume connector host is %(schost)s.",
+ {'ihost': instance.host,
+ 'chost': connector.get('host'),
+ 'schost': stashed_connector.get('host')})
+ connector = stashed_connector
+
volume_api.terminate_connection(context, volume_id, connector)
volume_api.detach(context.elevated(), volume_id, instance.uuid,
attachment_id)