1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
class VolumeOps(object):
"""
Management class for Volume-related tasks
"""
def __init__(self, session):
self._session = session
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
"""
Attach volume storage to VM instance.
"""
# NOTE: No Resource Pool concept so far
LOG.debug(_('Attach_volume: %(connection_info)s, %(instance_name)s,'
'" %(mountpoint)s'),
{'connection_info': connection_info,
'instance_name': instance_name,
'mountpoint': mountpoint})
dev_number = volume_utils.get_device_number(mountpoint)
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
sr_uuid, vdi_uuid = self._connect_volume(connection_info, dev_number,
instance_name, vm_ref,
hotplug=hotplug)
LOG.info(_('Mountpoint %(mountpoint)s attached to'
' instance %(instance_name)s'),
{'instance_name': instance_name, 'mountpoint': mountpoint})
return (sr_uuid, vdi_uuid)
def connect_volume(self, connection_info):
"""
Attach volume storage to the hypervisor without attaching to a VM
Used to attach the just the SR - e.g. for during live migration
"""
# NOTE: No Resource Pool concept so far
LOG.debug(_("Connect_volume: %s"), connection_info)
sr_uuid, vdi_uuid = self._connect_volume(connection_info,
None, None, None, False)
return (sr_uuid, vdi_uuid)
def _connect_volume(self, connection_info, dev_number=None,
instance_name=None, vm_ref=None, hotplug=True):
driver_type = connection_info['driver_volume_type']
if driver_type not in ['iscsi', 'xensm']:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
connection_data = connection_info['data']
sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info(
connection_data, 'Disk-for:%s' % instance_name)
# Introduce SR if not already present
sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if not sr_ref:
sr_ref = volume_utils.introduce_sr(
self._session, sr_uuid, sr_label, sr_params)
try:
# Introduce VDI
if 'vdi_uuid' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
vdi_uuid=connection_data['vdi_uuid'])
elif 'target_lun' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
target_lun=connection_data['target_lun'])
else:
# NOTE(sirp): This will introduce the first VDI in the SR
vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref)
# Attach
if vm_ref:
vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
dev_number, bootable=False,
osvol=True)
running = not vm_utils.is_vm_shutdown(self._session, vm_ref)
if hotplug and running:
volume_utils.vbd_plug(self._session, vbd_ref, vm_ref)
vdi_uuid = self._session.call_xenapi("VDI.get_uuid", vdi_ref)
return (sr_uuid, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
# NOTE(sirp): Forgetting the SR will have the effect of
# cleaning up the VDI and VBD records, so no need to handle
# that explicitly.
volume_utils.forget_sr(self._session, sr_ref)
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s"),
{'instance_name': instance_name, 'mountpoint': mountpoint})
device_number = volume_utils.get_device_number(mountpoint)
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
try:
vbd_ref = vm_utils.find_vbd_by_number(
self._session, vm_ref, device_number)
except volume_utils.StorageError:
# NOTE(sirp): If we don't find the VBD then it must have been
# detached previously.
LOG.warn(_('Skipping detach because VBD for %s was'
' not found'), instance_name)
return
# Unplug VBD if we're NOT shutdown
unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref)
self._detach_vbd(vbd_ref, unplug, vm_ref)
LOG.info(_('Mountpoint %(mountpoint)s detached from instance'
' %(instance_name)s'),
{'instance_name': instance_name, 'mountpoint': mountpoint})
def _get_all_volume_vbd_refs(self, vm_ref):
"""Return VBD refs for all Nova/Cinder volumes."""
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_ref in vbd_refs:
other_config = self._session.call_xenapi(
"VBD.get_other_config", vbd_ref)
if other_config.get('osvol'):
yield vbd_ref
def _detach_vbd(self, vbd_ref, unplug, vm_ref):
if unplug:
vm_utils.unplug_vbd(self._session, vbd_ref, vm_ref)
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
vm_utils.destroy_vbd(self._session, vbd_ref)
# Forget SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
def detach_all(self, vm_ref):
"""Detach any external nova/cinder volumes and purge the SRs."""
# Generally speaking, detach_all will be called with VM already
# shutdown; however if it's still running, we can still perform the
# operation by unplugging the VBD first.
unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref)
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
for vbd_ref in vbd_refs:
self._detach_vbd(vbd_ref, unplug, vm_ref)
def find_bad_volumes(self, vm_ref):
"""Find any volumes with their connection severed.
Certain VM operations (e.g. `VM.start`, `VM.reboot`, etc.) will not
work when a VBD is present that points to a non-working volume. To work
around this, we scan for non-working volumes and detach them before
retrying a failed operation.
"""
bad_devices = []
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
for vbd_ref in vbd_refs:
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
try:
# TODO(sirp): bug1152401 This relies on a 120 sec timeout
# within XenServer, update this to fail-fast when this is fixed
# upstream
self._session.call_xenapi("SR.scan", sr_ref)
except self._session.XenAPI.Failure as exc:
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
vbd_rec = vbd_rec = self._session.call_xenapi(
"VBD.get_record", vbd_ref)
bad_devices.append('/dev/%s' % vbd_rec['device'])
else:
raise
return bad_devices
|