summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2014-12-03 14:02:26 +0000
committerGerrit Code Review <review@openstack.org>2014-12-03 14:02:26 +0000
commitd656d37a28fce51ea1aa7ffd39ce421bda5b4fbe (patch)
tree0633b827ac5f431f567c267da4243922775cdf7a
parent549405e2801ce89ab5ef3bfe8eabb13dbc06292e (diff)
parent0d60ddc76796e47f2921bdb51fc8174aa3800ab0 (diff)
downloadcinder-d656d37a28fce51ea1aa7ffd39ce421bda5b4fbe.tar.gz
Merge "VMware: Set target ESX host for backing VM clone" into stable/juno
-rw-r--r--cinder/tests/test_vmware_vmdk.py17
-rw-r--r--cinder/tests/test_vmware_volumeops.py11
-rw-r--r--cinder/volume/drivers/vmware/vmdk.py12
-rw-r--r--cinder/volume/drivers/vmware/volumeops.py16
4 files changed, 34 insertions, 22 deletions
diff --git a/cinder/tests/test_vmware_vmdk.py b/cinder/tests/test_vmware_vmdk.py
index 82da1a0c3..aad08dc75 100644
--- a/cinder/tests/test_vmware_vmdk.py
+++ b/cinder/tests/test_vmware_vmdk.py
@@ -1051,7 +1051,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
select_ds_for_volume.assert_called_once_with(volume)
vops.clone_backing.assert_called_once_with(
volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
- summary.datastore, disk_type)
+ summary.datastore, disk_type, mock.sentinel.host)
vops.delete_backing.assert_called_once_with(backing)
self.assertFalse(extend_disk.called)
@@ -1554,7 +1554,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
vops.rename_backing.assert_called_once_with(backing, uuid)
vops.clone_backing.assert_called_once_with(
vol['name'], backing, None, volumeops.FULL_CLONE_TYPE,
- datastore, vmdk.THIN_VMDK_TYPE)
+ datastore, vmdk.THIN_VMDK_TYPE, host)
delete_temp_backing.assert_called_once_with(backing)
vops.change_backing_profile.assert_called_once_with(clone,
profile_id)
@@ -1766,7 +1766,8 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
summary = mock.Mock()
summary.datastore = mock.sentinel.datastore
- select_ds.return_value = (mock.ANY, mock.ANY, mock.ANY, summary)
+ select_ds.return_value = (mock.sentinel.host, mock.ANY, mock.ANY,
+ summary)
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
@@ -1783,7 +1784,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
volume['name'], src, None, volumeops.FULL_CLONE_TYPE,
- summary.datastore, disk_type)
+ summary.datastore, disk_type, mock.sentinel.host)
delete_temp_backing.assert_called_once_with(src)
create_backing.reset_mock()
@@ -1805,7 +1806,7 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
dest_uuid, src, None, volumeops.FULL_CLONE_TYPE,
- summary.datastore, disk_type)
+ summary.datastore, disk_type, mock.sentinel.host)
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
@@ -2132,7 +2133,8 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
fake_backing,
fake_snapshot,
fake_type,
- None)
+ None,
+ host=None)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
@@ -2177,7 +2179,8 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
fake_backing,
fake_snapshot,
volumeops.FULL_CLONE_TYPE,
- fake_datastore)
+ fake_datastore,
+ host=fake_host)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
diff --git a/cinder/tests/test_vmware_volumeops.py b/cinder/tests/test_vmware_volumeops.py
index 3aef159ac..f6e0e9bef 100644
--- a/cinder/tests/test_vmware_volumeops.py
+++ b/cinder/tests/test_vmware_volumeops.py
@@ -983,7 +983,7 @@ class VolumeOpsTestCase(test.TestCase):
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
- backing, None)
+ backing, None, None)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
@@ -999,24 +999,25 @@ class VolumeOpsTestCase(test.TestCase):
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'createNewChildDiskBacking'
get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
- backing, None)
+ backing, None, None)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
folder=folder, name=name, spec=clone_spec)]
self.assertEqual(expected, self.session.invoke_api.mock_calls)
- # Test disk type conversion.
+ # Test disk type conversion and target host.
clone_type = None
disk_type = 'thin'
+ host = mock.sentinel.host
self.session.invoke_api.reset_mock()
ret = self.vops.clone_backing(name, backing, snapshot, clone_type,
- datastore, disk_type)
+ datastore, disk_type, host)
self.assertEqual(mock.sentinel.new_backing, ret)
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
get_clone_spec.assert_called_with(datastore, disk_move_type, snapshot,
- backing, disk_type)
+ backing, disk_type, host)
expected = [mock.call(vim_util, 'get_object_property',
self.session.vim, backing, 'parent'),
mock.call(self.session.vim, 'CloneVM_Task', backing,
diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py
index 13d32a1f4..ed1a3a9b9 100644
--- a/cinder/volume/drivers/vmware/vmdk.py
+++ b/cinder/volume/drivers/vmware/vmdk.py
@@ -1136,7 +1136,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
None,
volumeops.FULL_CLONE_TYPE,
datastore,
- disk_type)
+ disk_type,
+ host)
self._delete_temp_backing(backing)
except Exception:
# Delete backing and virtual disk created from image.
@@ -1499,7 +1500,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
new_backing = self.volumeops.clone_backing(
volume['name'], backing, None,
- volumeops.FULL_CLONE_TYPE, datastore, new_disk_type)
+ volumeops.FULL_CLONE_TYPE, datastore, new_disk_type,
+ host)
self._delete_temp_backing(backing)
backing = new_backing
except error_util.VimException:
@@ -1713,7 +1715,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
dest = self.volumeops.clone_backing(dest_name, src, None,
volumeops.FULL_CLONE_TYPE,
- datastore, disk_type)
+ datastore, disk_type, host)
if new_backing:
LOG.debug("Created new backing: %s for restoring backup.",
dest_name)
@@ -1978,12 +1980,14 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
:param src_vsize: the size of the source volume
"""
datastore = None
+ host = None
if not clone_type == volumeops.LINKED_CLONE_TYPE:
# Pick a datastore where to create the full clone under any host
(host, rp, folder, summary) = self._select_ds_for_volume(volume)
datastore = summary.datastore
clone = self.volumeops.clone_backing(volume['name'], backing,
- snapshot, clone_type, datastore)
+ snapshot, clone_type, datastore,
+ host=host)
# If the volume size specified by the user is greater than
# the size of the source volume, the newly created volume will
# allocate the capacity to the size of the source volume in the backend
diff --git a/cinder/volume/drivers/vmware/volumeops.py b/cinder/volume/drivers/vmware/volumeops.py
index 1eef77070..1548697c6 100644
--- a/cinder/volume/drivers/vmware/volumeops.py
+++ b/cinder/volume/drivers/vmware/volumeops.py
@@ -996,7 +996,7 @@ class VMwareVolumeOps(object):
return self._get_parent(backing, 'Folder')
def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
- disk_type):
+ disk_type, host=None):
"""Get the clone spec.
:param datastore: Reference to datastore
@@ -1004,6 +1004,7 @@ class VMwareVolumeOps(object):
:param snapshot: Reference to snapshot
:param backing: Source backing VM
:param disk_type: Disk type of clone
+ :param host: Target host
:return: Clone spec
"""
if disk_type is not None:
@@ -1011,7 +1012,7 @@ class VMwareVolumeOps(object):
else:
disk_device = None
- relocate_spec = self._get_relocate_spec(datastore, None, None,
+ relocate_spec = self._get_relocate_spec(datastore, None, host,
disk_move_type, disk_type,
disk_device)
cf = self._session.vim.client.factory
@@ -1025,7 +1026,7 @@ class VMwareVolumeOps(object):
return clone_spec
def clone_backing(self, name, backing, snapshot, clone_type, datastore,
- disk_type=None):
+ disk_type=None, host=None):
"""Clone backing.
If the clone_type is 'full', then a full clone of the source volume
@@ -1038,19 +1039,22 @@ class VMwareVolumeOps(object):
:param clone_type: Whether a full clone or linked clone is to be made
:param datastore: Reference to the datastore entity
:param disk_type: Disk type of the clone
+ :param host: Target host
"""
LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
- "datastore: %(ds)s with disk type: %(disk_type)s.",
+ "host: %(host)s, datastore: %(ds)s with disk type: "
+ "%(disk_type)s.",
{'back': backing, 'name': name, 'type': clone_type,
- 'snap': snapshot, 'ds': datastore, 'disk_type': disk_type})
+ 'snap': snapshot, 'ds': datastore, 'disk_type': disk_type,
+ 'host': host})
folder = self._get_folder(backing)
if clone_type == LINKED_CLONE_TYPE:
disk_move_type = 'createNewChildDiskBacking'
else:
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
clone_spec = self._get_clone_spec(datastore, disk_move_type, snapshot,
- backing, disk_type)
+ backing, disk_type, host)
task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
backing, folder=folder, name=name,
spec=clone_spec)