summaryrefslogtreecommitdiff
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/api/openstack/compute/contrib/flavor_access.py3
-rw-r--r--nova/api/openstack/compute/contrib/flavormanage.py2
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py2
-rw-r--r--nova/api/openstack/compute/flavors.py4
-rw-r--r--nova/compute/api.py9
-rw-r--r--nova/compute/instance_types.py2
-rwxr-xr-xnova/compute/manager.py22
-rw-r--r--nova/console/websocketproxy.py5
-rw-r--r--nova/db/api.py4
-rw-r--r--nova/db/sqlalchemy/api.py57
-rw-r--r--nova/network/manager.py19
-rw-r--r--nova/network/quantumv2/api.py9
-rw-r--r--nova/scheduler/driver.py12
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_access.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_manage.py3
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_swap.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavorextradata.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py14
-rw-r--r--nova/tests/api/openstack/compute/test_flavors.py4
-rw-r--r--nova/tests/compute/test_compute.py161
-rw-r--r--nova/tests/fakelibvirt.py2
-rw-r--r--nova/tests/network/test_quantumv2.py32
-rw-r--r--nova/tests/scheduler/test_scheduler.py4
-rw-r--r--nova/tests/test_db_api.py67
-rw-r--r--nova/tests/test_hypervapi.py71
-rw-r--r--nova/tests/test_libvirt.py155
-rw-r--r--nova/tests/test_powervm.py31
-rw-r--r--nova/tests/test_virt_drivers.py3
-rw-r--r--nova/tests/test_vmwareapi.py65
-rw-r--r--nova/tests/test_vmwareapi_vm_util.py103
-rw-r--r--nova/tests/test_xenapi.py5
-rw-r--r--nova/tests/vmwareapi/db_fakes.py3
-rwxr-xr-xnova/virt/driver.py2
-rw-r--r--nova/virt/firewall.py16
-rw-r--r--nova/virt/hyperv/hostutils.py4
-rw-r--r--nova/virt/hyperv/livemigrationops.py27
-rw-r--r--nova/virt/hyperv/volumeops.py2
-rwxr-xr-xnova/virt/libvirt/driver.py77
-rwxr-xr-xnova/virt/libvirt/utils.py6
-rw-r--r--nova/virt/powervm/blockdev.py11
-rwxr-xr-xnova/virt/vmwareapi/driver.py22
-rw-r--r--nova/virt/vmwareapi/fake.py84
-rw-r--r--nova/virt/vmwareapi/host.py8
-rw-r--r--nova/virt/vmwareapi/vm_util.py127
-rw-r--r--nova/virt/vmwareapi/vmops.py118
47 files changed, 1139 insertions, 248 deletions
diff --git a/nova/api/openstack/compute/contrib/flavor_access.py b/nova/api/openstack/compute/contrib/flavor_access.py
index 39220ee4af..c7adb238e2 100644
--- a/nova/api/openstack/compute/contrib/flavor_access.py
+++ b/nova/api/openstack/compute/contrib/flavor_access.py
@@ -95,7 +95,8 @@ class FlavorAccessController(object):
authorize(context)
try:
- flavor = instance_types.get_instance_type_by_flavor_id(flavor_id)
+ flavor = instance_types.get_instance_type_by_flavor_id(flavor_id,
+ ctxt=context)
except exception.FlavorNotFound:
explanation = _("Flavor not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
diff --git a/nova/api/openstack/compute/contrib/flavormanage.py b/nova/api/openstack/compute/contrib/flavormanage.py
index 7cff7f4b89..de4e0c555c 100644
--- a/nova/api/openstack/compute/contrib/flavormanage.py
+++ b/nova/api/openstack/compute/contrib/flavormanage.py
@@ -43,7 +43,7 @@ class FlavorManageController(wsgi.Controller):
try:
flavor = instance_types.get_instance_type_by_flavor_id(
- id, read_deleted="no")
+ id, ctxt=context, read_deleted="no")
except exception.NotFound, e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index 700d9b71e3..f3d047c8c0 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -330,7 +330,7 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
- if net != '0.0.0.0' and prefixlen == '0':
+ if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
diff --git a/nova/api/openstack/compute/flavors.py b/nova/api/openstack/compute/flavors.py
index a0d33abd46..93280fd90d 100644
--- a/nova/api/openstack/compute/flavors.py
+++ b/nova/api/openstack/compute/flavors.py
@@ -84,7 +84,9 @@ class Controller(wsgi.Controller):
def show(self, req, id):
"""Return data about the given flavor id."""
try:
- flavor = instance_types.get_instance_type_by_flavor_id(id)
+ context = req.environ['nova.context']
+ flavor = instance_types.get_instance_type_by_flavor_id(id,
+ ctxt=context)
req.cache_db_flavor(flavor)
except exception.NotFound:
raise webob.exc.HTTPNotFound()
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 78328c4540..be88be2ea4 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -775,11 +775,16 @@ class API(base.Base):
for bdm in self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']):
# NOTE(vish): For now, just make sure the volumes are accessible.
+ # Additionally, check that the volume can be attached to this
+ # instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
if volume_id is not None:
try:
- self.volume_api.get(context, volume_id)
+ volume = self.volume_api.get(context, volume_id)
+ self.volume_api.check_attach(context,
+ volume,
+ instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
@@ -1318,7 +1323,7 @@ class API(base.Base):
#NOTE(bcwaldon): this doesn't really belong in this class
def get_instance_type(self, context, instance_type_id):
"""Get an instance type by instance type id."""
- return instance_types.get_instance_type(instance_type_id)
+ return instance_types.get_instance_type(instance_type_id, ctxt=context)
def get(self, context, instance_id):
"""Get a single instance with the given instance_id."""
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 0734ce7fb5..f9758ec3aa 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -209,7 +209,7 @@ def get_instance_type_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
if ctxt is None:
ctxt = context.get_admin_context(read_deleted=read_deleted)
- return db.instance_type_get_by_flavor_id(ctxt, flavorid)
+ return db.instance_type_get_by_flavor_id(ctxt, flavorid, read_deleted)
def get_instance_type_access_by_flavor_id(flavorid, ctxt=None):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index cae121747e..63eb20a21a 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1595,8 +1595,20 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.info(_("disk not on shared storagerebuilding from:"
" '%s'") % str(image_ref))
- instance = self._instance_update(
- context, instance['uuid'], host=self.host)
+ # NOTE(mriedem): On a recreate (evacuate), we need to update
+ # the instance's host and node properties to reflect it's
+ # destination node for the recreate.
+ node_name = None
+ try:
+ compute_node = self._get_compute_info(context, self.host)
+ node_name = compute_node['hypervisor_hostname']
+ except exception.NotFound:
+ LOG.exception(_('Failed to get compute_info for %s') %
+ self.host)
+ finally:
+ instance = self._instance_update(
+ context, instance['uuid'], host=self.host,
+ node=node_name)
if image_ref:
image_meta = _get_image_meta(context, image_ref)
@@ -2112,7 +2124,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance = self._instance_update(context, instance['uuid'],
vm_state=vm_states.ACTIVE,
task_state=None,
- expected_task_state=None)
+ expected_task_state=[None,
+ task_states.DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
@@ -3907,7 +3920,8 @@ class ComputeManager(manager.SchedulerDependentManager):
old_enough = (not instance['deleted_at'] or
timeutils.is_older_than(instance['deleted_at'],
interval))
- soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED
+ soft_deleted = (instance['vm_state'] == vm_states.SOFT_DELETED and
+ instance['task_state'] == None)
if soft_deleted and old_enough:
capi = self.conductor_api
diff --git a/nova/console/websocketproxy.py b/nova/console/websocketproxy.py
index f7fdea1735..ce3fff8d53 100644
--- a/nova/console/websocketproxy.py
+++ b/nova/console/websocketproxy.py
@@ -42,6 +42,11 @@ class NovaWebSocketProxy(websockify.WebSocketProxy):
"""
Called after a new WebSocket connection has been established.
"""
+ # Reopen the eventlet hub to make sure we don't share an epoll
+ # fd with parent and/or siblings, which would be bad
+ from eventlet import hubs
+ hubs.use_hub()
+
cookie = Cookie.SimpleCookie()
cookie.load(self.headers.getheader('cookie'))
token = cookie['token'].value
diff --git a/nova/db/api.py b/nova/db/api.py
index e8d70fa937..b6cedc7738 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -1283,9 +1283,9 @@ def instance_type_get_by_name(context, name):
return IMPL.instance_type_get_by_name(context, name)
-def instance_type_get_by_flavor_id(context, id):
+def instance_type_get_by_flavor_id(context, id, read_deleted=None):
"""Get instance type by flavor id."""
- return IMPL.instance_type_get_by_flavor_id(context, id)
+ return IMPL.instance_type_get_by_flavor_id(context, id, read_deleted)
def instance_type_destroy(context, name):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 15c583f458..278b309dfe 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -36,6 +36,7 @@ from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
+from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.schema import Table
@@ -1690,7 +1691,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
- 'metadata']
+ 'metadata', 'task_state']
# Filter the query
query_prefix = exact_filter(query_prefix, models.Instance,
@@ -3231,6 +3232,8 @@ def security_group_rule_get_by_security_group(context, security_group_id,
filter_by(parent_group_id=security_group_id).\
options(joinedload_all('grantee_group.instances.'
'system_metadata')).\
+ options(joinedload('grantee_group.instances.'
+ 'info_cache')).\
all()
@@ -3556,7 +3559,7 @@ def instance_type_create(context, values):
pass
try:
instance_type_get_by_flavor_id(context, values['flavorid'],
- session)
+ read_deleted='no', session=session)
raise exception.InstanceTypeIdExists(flavor_id=values['flavorid'])
except exception.FlavorNotFound:
pass
@@ -3598,9 +3601,16 @@ def _dict_with_extra_specs(inst_type_query):
def _instance_type_get_query(context, session=None, read_deleted=None):
- return model_query(context, models.InstanceTypes, session=session,
+ query = model_query(context, models.InstanceTypes, session=session,
read_deleted=read_deleted).\
- options(joinedload('extra_specs'))
+ options(joinedload('extra_specs'))
+ if not context.is_admin:
+ the_filter = [models.InstanceTypes.is_public == True]
+ the_filter.extend([
+ models.InstanceTypes.projects.any(project_id=context.project_id)
+ ])
+ query = query.filter(or_(*the_filter))
+ return query
@require_context
@@ -3675,9 +3685,11 @@ def instance_type_get_by_name(context, name, session=None):
@require_context
-def instance_type_get_by_flavor_id(context, flavor_id, session=None):
+def instance_type_get_by_flavor_id(context, flavor_id, read_deleted,
+ session=None):
"""Returns a dict describing specific flavor_id."""
- result = _instance_type_get_query(context, session=session).\
+ result = _instance_type_get_query(context, read_deleted=read_deleted,
+ session=session).\
filter_by(flavorid=flavor_id).\
first()
@@ -3727,7 +3739,7 @@ def instance_type_access_add(context, flavor_id, project_id):
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
- session=session)
+ read_deleted='no', session=session)
instance_type_id = instance_type_ref['id']
access_ref = _instance_type_access_query(context, session=session).\
filter_by(instance_type_id=instance_type_id).\
@@ -3750,7 +3762,7 @@ def instance_type_access_remove(context, flavor_id, project_id):
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
- session=session)
+ read_deleted='no', session=session)
instance_type_id = instance_type_ref['id']
count = _instance_type_access_query(context, session=session).\
filter_by(instance_type_id=instance_type_id).\
@@ -4346,8 +4358,16 @@ def aggregate_get(context, aggregate_id):
@require_admin_context
def aggregate_get_by_host(context, host, key=None):
- query = _aggregate_get_query(context, models.Aggregate,
- models.AggregateHost.host, host)
+ """Return rows that match host (mandatory) and metadata key (optional).
+
+ :param host matches host, and is required.
+ :param key Matches metadata key, if not None.
+ """
+ query = model_query(context, models.Aggregate)
+ query = query.options(joinedload('_hosts'))
+ query = query.options(joinedload('_metadata'))
+ query = query.join('_hosts')
+ query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
@@ -4357,13 +4377,16 @@ def aggregate_get_by_host(context, host, key=None):
@require_admin_context
def aggregate_metadata_get_by_host(context, host, key=None):
- query = model_query(context, models.Aggregate).join(
- "_hosts").filter(models.AggregateHost.host == host).join(
- "_metadata")
+ query = model_query(context, models.Aggregate)
+ query = query.join("_hosts")
+ query = query.join("_metadata")
+ query = query.filter(models.AggregateHost.host == host)
+ query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
+
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
@@ -4373,9 +4396,13 @@ def aggregate_metadata_get_by_host(context, host, key=None):
@require_admin_context
def aggregate_host_get_by_metadata_key(context, key):
- query = model_query(context, models.Aggregate).join(
- "_metadata").filter(models.AggregateMetadata.key == key)
+ query = model_query(context, models.Aggregate)
+ query = query.join("_metadata")
+ query = query.filter(models.AggregateMetadata.key == key)
+ query = query.options(contains_eager("_metadata"))
+ query = query.options(joinedload("_hosts"))
rows = query.all()
+
metadata = collections.defaultdict(set)
for agg in rows:
for agghost in agg._hosts:
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 022f0e67d0..71e5442de4 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -571,7 +571,6 @@ class NetworkManager(manager.Manager):
instance_id = instance_uuid
instance_uuid = instance_id
- host = kwargs.get('host')
vifs = self.db.virtual_interface_get_by_instance(context,
instance_uuid)
networks = {}
@@ -931,11 +930,27 @@ class NetworkManager(manager.Manager):
LOG.error(msg % address)
return
+ # NOTE(cfb): Call teardown before release_dhcp to ensure
+ # that the IP can't be re-leased after a release
+ # packet is sent.
+ self._teardown_network_on_host(context, network)
# NOTE(vish): This forces a packet so that the release_fixed_ip
# callback will get called by nova-dhcpbridge.
self.driver.release_dhcp(dev, address, vif['address'])
- self._teardown_network_on_host(context, network)
+ # NOTE(yufang521247): This is probably a failed dhcp fixed ip.
+ # DHCPRELEASE packet sent to dnsmasq would not trigger
+ # dhcp-bridge to run. Thus it is better to disassociate such
+ # fixed ip here.
+ fixed_ip_ref = self.db.fixed_ip_get_by_address(context,
+ address)
+ if (instance_uuid == fixed_ip_ref['instance_uuid'] and
+ not fixed_ip_ref.get('leased')):
+ self.db.fixed_ip_disassociate(context, address)
+
+ else:
+ # We can't try to free the IP address so just call teardown
+ self._teardown_network_on_host(context, network)
# Commit the reservations
if reservations:
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index cdc892eba6..506c3b32bb 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -31,6 +31,7 @@ from nova.network import model as network_model
from nova.network import quantumv2
from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
@@ -792,12 +793,18 @@ class API(base.Base):
data = client.list_ports(**search_opts)
ports = data.get('ports', [])
if networks is None:
+ # retrieve networks from info_cache to get correct nic order
+ network_cache = self.conductor_api.instance_get_by_uuid(
+ context, instance['uuid'])['info_cache']['network_info']
+ network_cache = jsonutils.loads(network_cache)
+ net_ids = [iface['network']['id'] for iface in network_cache]
networks = self._get_available_networks(context,
instance['project_id'])
# ensure ports are in preferred network order, and filter out
# those not attached to one of the provided list of networks
- net_ids = [n['id'] for n in networks]
+ else:
+ net_ids = [n['id'] for n in networks]
ports = [port for port in ports if port['network_id'] in net_ids]
_ensure_requested_network_ordering(lambda x: x['network_id'],
ports, net_ids)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index b6e7444105..7add6e8c7d 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -258,15 +258,13 @@ class Scheduler(object):
# If dest is not specified, have scheduler pick one.
if dest is None:
instance_type = instance_types.extract_instance_type(instance_ref)
- if not instance_ref['image_ref']:
- image = None
- else:
- image = self.image_service.show(context,
- instance_ref['image_ref'])
request_spec = {'instance_properties': instance_ref,
'instance_type': instance_type,
- 'instance_uuids': [instance_ref['uuid']],
- 'image': image}
+ 'instance_uuids': [instance_ref['uuid']]}
+ if instance_ref['image_ref']:
+ image = self.image_service.show(context,
+ instance_ref['image_ref'])
+ request_spec['image'] = image
filter_properties = {'ignore_hosts': ignore_hosts}
return self.select_hosts(context, request_spec,
filter_properties)[0]
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
index fc5543409e..97b67a390f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
@@ -68,7 +68,7 @@ def fake_get_instance_type_access_by_flavor_id(flavorid):
return res
-def fake_get_instance_type_by_flavor_id(flavorid):
+def fake_get_instance_type_by_flavor_id(flavorid, ctxt=None):
return INSTANCE_TYPES[flavorid]
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py b/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
index 5d15264abd..b60d761cec 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
@@ -39,7 +39,7 @@ FAKE_FLAVORS = {
}
-def fake_instance_type_get_by_flavor_id(flavorid):
+def fake_instance_type_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
index 9b58e7b747..1a4390e07a 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
@@ -25,7 +25,8 @@ from nova import test
from nova.tests.api.openstack import fakes
-def fake_get_instance_type_by_flavor_id(flavorid, read_deleted='yes'):
+def fake_get_instance_type_by_flavor_id(flavorid, ctxt=None,
+ read_deleted='yes'):
if flavorid == 'failtest':
raise exception.NotFound("Not found sucka!")
elif not str(flavorid) == '1234':
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py b/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
index d86f750cf9..cbc94c3d51 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
@@ -38,7 +38,7 @@ FAKE_FLAVORS = {
}
-def fake_instance_type_get_by_flavor_id(flavorid):
+def fake_instance_type_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
index eeb0fe6322..a5fcb02346 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
@@ -38,7 +38,7 @@ FAKE_FLAVORS = {
}
-def fake_instance_type_get_by_flavor_id(flavorid):
+def fake_instance_type_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavorextradata.py b/nova/tests/api/openstack/compute/contrib/test_flavorextradata.py
index 1c5703381a..5239b96952 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavorextradata.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavorextradata.py
@@ -23,7 +23,7 @@ from nova import test
from nova.tests.api.openstack import fakes
-def fake_get_instance_type_by_flavor_id(flavorid):
+def fake_get_instance_type_by_flavor_id(flavorid, ctxt=None):
return {
'id': flavorid,
'flavorid': str(flavorid),
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index fdd73e3f7b..9b0358b050 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -1110,6 +1110,20 @@ class TestSecurityGroupRules(test.TestCase):
self.assertEquals(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
+ def test_create_rule_cidr_ipv6_allow_all(self):
+ rule = security_group_rule_template(cidr='::/0',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEquals(security_group_rule['id'], 0)
+ self.assertEquals(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEquals(security_group_rule['ip_range']['cidr'],
+ "::/0")
+
def test_create_rule_cidr_allow_some(self):
rule = security_group_rule_template(cidr='15.0.0.0/8',
parent_group_id=self.sg2['id'])
diff --git a/nova/tests/api/openstack/compute/test_flavors.py b/nova/tests/api/openstack/compute/test_flavors.py
index 3a11c92fe3..952654152c 100644
--- a/nova/tests/api/openstack/compute/test_flavors.py
+++ b/nova/tests/api/openstack/compute/test_flavors.py
@@ -50,7 +50,7 @@ FAKE_FLAVORS = {
}
-def fake_instance_type_get_by_flavor_id(flavorid):
+def fake_instance_type_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
@@ -76,7 +76,7 @@ def empty_instance_type_get_all(inactive=False, filters=None):
return {}
-def return_instance_type_not_found(flavor_id):
+def return_instance_type_not_found(flavor_id, ctxt=None):
raise exception.InstanceTypeNotFound(instance_type_id=flavor_id)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index e220996f31..d3ef9bf746 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -331,6 +331,59 @@ class ComputeVolumeTestCase(BaseTestCase):
block_device_mapping)
self.assertEqual(self.cinfo.get('serial'), self.volume_id)
+ def test_validate_bdm(self):
+ # Test if volume is checked for availability before being attached
+ # at boot time
+
+ def fake_bdms(context, instance_uuid):
+ block_device_mapping = [{
+ 'id': 1,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'device_name': 'vda',
+ 'delete_on_termination': False,
+ }]
+ return block_device_mapping
+ self.stubs.Set(self.compute.db,
+ 'block_device_mapping_get_all_by_instance',
+ fake_bdms)
+
+ # Check that the volume status is 'available' and reject if not
+ def fake_volume_get_1(self, context, volume_id):
+ return {'id': volume_id,
+ 'status': 'creating',
+ 'attach_status': 'detached'}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_1)
+
+ self.assertRaises(exception.InvalidBDMVolume,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance=self.instance)
+
+ # Check that the volume attach_status is 'detached' and reject if not
+ def fake_volume_get_2(self, context, volume_id):
+ return {'id': volume_id,
+ 'status': 'available',
+ 'attach_status': 'attached'}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_2)
+
+ self.assertRaises(exception.InvalidBDMVolume,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance=self.instance)
+
+ # Check that the volume status is 'available' and attach_status is
+ # 'detached' and accept the request if so
+ def fake_volume_get_3(self, context, volume_id):
+ return {'id': volume_id,
+ 'status': 'available',
+ 'attach_status': 'detached'}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_3)
+
+ self.compute_api._validate_bdm(self.context, instance=self.instance)
+
class ComputeTestCase(BaseTestCase):
def test_wrap_instance_fault(self):
@@ -4251,6 +4304,45 @@ class ComputeTestCase(BaseTestCase):
updated_ats = (updated_at_1, updated_at_2, updated_at_3)
self.assertEqual(len(updated_ats), len(set(updated_ats)))
+ def test_reclaim_queued_deletes(self):
+ self.flags(reclaim_instance_interval=3600)
+ ctxt = context.get_admin_context()
+
+ # Active
+ self._create_fake_instance(params={'host': CONF.host})
+
+ # Deleted not old enough
+ self._create_fake_instance(params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': timeutils.utcnow()})
+
+ # Deleted old enough (only this one should be reclaimed)
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ instance = self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': deleted_at})
+
+ # Restoring
+ # NOTE(hanlind): This specifically tests for a race condition
+ # where restoring a previously soft deleted instance sets
+ # deleted_at back to None, causing reclaim to think it can be
+ # deleted, see LP #1186243.
+ self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'task_state': task_states.RESTORING})
+
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+ instance_ref = jsonutils.to_primitive(db.instance_get_by_uuid(
+ ctxt, instance['uuid']))
+ self.compute._delete_instance(ctxt, instance_ref, [])
+
+ self.mox.ReplayAll()
+
+ self.compute._reclaim_queued_deletes(ctxt)
+
class ComputeAPITestCase(BaseTestCase):
@@ -5503,6 +5595,43 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
+ def test_allow_confirm_resize_on_instance_in_deleting_task_state(self):
+ instance = self._create_fake_instance()
+ old_type = instance_types.extract_instance_type(instance)
+ new_type = instance_types.get_instance_type_by_flavor_id('4')
+ sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ sys_meta = instance_types.save_instance_type_info(sys_meta,
+ old_type, 'old_')
+ sys_meta = instance_types.save_instance_type_info(sys_meta,
+ new_type, 'new_')
+ sys_meta = instance_types.save_instance_type_info(sys_meta,
+ new_type)
+
+ fake_rt = self.mox.CreateMockAnything()
+
+ def fake_confirm_resize(*args, **kwargs):
+ pass
+
+ def fake_get_resource_tracker(self):
+ return fake_rt
+
+ self.stubs.Set(fake_rt, 'confirm_resize', fake_confirm_resize)
+ self.stubs.Set(self.compute, '_get_resource_tracker',
+ fake_get_resource_tracker)
+
+ migration = db.migration_create(self.context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'status': 'finished'})
+ instance = db.instance_update(self.context, instance['uuid'],
+ {'task_state': task_states.DELETING,
+ 'vm_state': vm_states.RESIZED,
+ 'system_metadata': sys_meta})
+
+ self.compute.confirm_resize(self.context, instance,
+ migration=migration)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(vm_states.ACTIVE, instance['vm_state'])
+
def test_resize_revert_through_api(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
@@ -8017,7 +8146,8 @@ class EvacuateHostTestCase(BaseTestCase):
def setUp(self):
super(EvacuateHostTestCase, self).setUp()
self.inst_ref = jsonutils.to_primitive(self._create_fake_instance
- ({'host': 'fake_host_2'}))
+ ({'host': 'fake_host_2',
+ 'node': 'fakenode2'}))
db.instance_update(self.context, self.inst_ref['uuid'],
{"task_state": task_states.REBUILDING})
@@ -8035,8 +8165,34 @@ class EvacuateHostTestCase(BaseTestCase):
on_shared_storage=on_shared_storage)
def test_rebuild_on_host_updated_target(self):
- """Confirm evacuate scenario updates host."""
+ """Confirm evacuate scenario updates host and node."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+
+ def fake_get_compute_info(context, host):
+ self.assertTrue(context.is_admin)
+ self.assertEquals('fake-mini', host)
+ return {'hypervisor_hostname': self.rt.nodename}
+
+ self.stubs.Set(self.compute, '_get_compute_info',
+ fake_get_compute_info)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # Should be on destination host
+ instance = db.instance_get(self.context, self.inst_ref['id'])
+ self.assertEqual(instance['host'], self.compute.host)
+ self.assertEqual(NODENAME, instance['node'])
+
+ def test_rebuild_on_host_updated_target_node_not_found(self):
+ """Confirm evacuate scenario where compute_node isn't found."""
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+
+ def fake_get_compute_info(context, host):
+ raise exception.NotFound(_("Host %s not found") % host)
+
+ self.stubs.Set(self.compute, '_get_compute_info',
+ fake_get_compute_info)
self.mox.ReplayAll()
self._rebuild()
@@ -8044,6 +8200,7 @@ class EvacuateHostTestCase(BaseTestCase):
# Should be on destination host
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['host'], self.compute.host)
+ self.assertIsNone(instance['node'])
def test_rebuild_with_instance_in_stopped_state(self):
"""Confirm evacuate scenario updates vm_state to stopped
diff --git a/nova/tests/fakelibvirt.py b/nova/tests/fakelibvirt.py
index 05dce0c760..927c04ae09 100644
--- a/nova/tests/fakelibvirt.py
+++ b/nova/tests/fakelibvirt.py
@@ -117,6 +117,8 @@ VIR_FROM_REMOTE = 340
VIR_FROM_RPC = 345
VIR_ERR_XML_DETAIL = 350
VIR_ERR_NO_DOMAIN = 420
+VIR_ERR_OPERATION_INVALID = 55
+VIR_ERR_OPERATION_TIMEOUT = 68
VIR_ERR_NO_NWFILTER = 620
VIR_ERR_SYSTEM_ERROR = 900
VIR_ERR_INTERNAL_ERROR = 950
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index c0773e8ae6..57ccfd8094 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -23,11 +23,13 @@ from quantumclient.common import exceptions as qexceptions
from quantumclient.v2_0 import client
from nova.compute import instance_types
+from nova.conductor import api as conductor_api
from nova import context
from nova import exception
from nova.network import model
from nova.network import quantumv2
from nova.network.quantumv2 import api as quantumapi
+from nova.openstack.common import jsonutils
from nova import test
from nova import utils
@@ -269,6 +271,15 @@ class TestQuantumv2(test.TestCase):
self.instance['uuid'],
mox.IgnoreArg())
port_data = number == 1 and self.port_data1 or self.port_data2
+ self.mox.StubOutWithMock(conductor_api.API,
+ 'instance_get_by_uuid')
+ net_info_cache = []
+ for port in port_data:
+ net_info_cache.append({"network": {"id": port['network_id']}})
+ info_cache = {'info_cache': {'network_info':
+ jsonutils.dumps(net_info_cache)}}
+ api.conductor_api.instance_get_by_uuid(
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(info_cache)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
@@ -368,6 +379,16 @@ class TestQuantumv2(test.TestCase):
quantumv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
+ self.mox.StubOutWithMock(conductor_api.API,
+ 'instance_get_by_uuid')
+ net_info_cache = []
+ for port in self.port_data3:
+ net_info_cache.append({"network": {"id": port['network_id']}})
+ info_cache = {'info_cache': {'network_info':
+ jsonutils.dumps(net_info_cache)}}
+
+ api.conductor_api.instance_get_by_uuid(
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(info_cache)
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
@@ -730,7 +751,17 @@ class TestQuantumv2(test.TestCase):
def _test_deallocate_port_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
self.moxed_client.delete_port(port_data[0]['id'])
+ self.mox.StubOutWithMock(conductor_api.API,
+ 'instance_get_by_uuid')
+ net_info_cache = []
+ for port in port_data:
+ net_info_cache.append({"network": {"id": port['network_id']}})
+ info_cache = {'info_cache': {'network_info':
+ jsonutils.dumps(net_info_cache)}}
+ api = quantumapi.API()
+ api.conductor_api.instance_get_by_uuid(
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(info_cache)
nets = [port_data[0]['network_id']]
quantumv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
@@ -758,7 +789,6 @@ class TestQuantumv2(test.TestCase):
self.mox.ReplayAll()
- api = quantumapi.API()
nwinfo = api.deallocate_port_for_instance(self.context, self.instance,
port_data[0]['id'])
self.assertEqual(len(nwinfo), len(port_data[1:]))
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 392ea81cfe..e4baab5510 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -808,9 +808,7 @@ class SchedulerTestCase(test.TestCase):
request_spec = {'instance_properties': instance,
'instance_type': {},
- 'instance_uuids': [instance['uuid']],
- 'image': None
- }
+ 'instance_uuids': [instance['uuid']]}
ignore_hosts = [instance['host']]
filter_properties = {'ignore_hosts': ignore_hosts}
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index f9a5e3c9d6..6026f34cd5 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -1306,18 +1306,36 @@ class AggregateDBApiTestCase(test.TestCase):
def test_aggregate_get_by_host(self):
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate2'}
+ values3 = {'name': 'fake_aggregate3'}
+ values4 = {'name': 'fake_aggregate4'}
+ values5 = {'name': 'fake_aggregate5'}
a1 = _create_aggregate_with_hosts(context=ctxt)
- a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
+ a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
+ # a3 has no hosts and should not be in the results.
+ a3 = _create_aggregate(context=ctxt, values=values3)
+ # a4 has no matching hosts.
+ a4 = _create_aggregate_with_hosts(context=ctxt, values=values4,
+ hosts=['foo4.openstack.org'])
+ # a5 has no matching hosts after deleting the only matching host.
+ a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
+ hosts=['foo5.openstack.org', 'foo.openstack.org'])
+ db.aggregate_host_delete(ctxt, a5['id'],
+ 'foo.openstack.org')
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
def test_aggregate_get_by_host_with_key(self):
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate2'}
+ values3 = {'name': 'fake_aggregate3'}
+ values4 = {'name': 'fake_aggregate4'}
a1 = _create_aggregate_with_hosts(context=ctxt,
metadata={'goodkey': 'good'})
- a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
+ _create_aggregate_with_hosts(context=ctxt, values=values2)
+ _create_aggregate(context=ctxt, values=values3)
+ _create_aggregate_with_hosts(context=ctxt, values=values4,
+ hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'})
# filter result by key
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
self.assertEqual([a1['id']], [x['id'] for x in r1])
@@ -1336,16 +1354,22 @@ class AggregateDBApiTestCase(test.TestCase):
def test_aggregate_metadata_get_by_host_with_key(self):
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2'}
- values2 = {'name': 'fake_aggregate3'}
+ values2 = {'name': 'fake_aggregate12'}
+ values3 = {'name': 'fake_aggregate23'}
+ a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
+ a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
+ a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
+ a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
a1 = _create_aggregate_with_hosts(context=ctxt)
- a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
- a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
- hosts=['foo.openstack.org'], metadata={'good': 'value'})
- r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org',
+ a2 = _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=a2_hosts, metadata=a2_metadata)
+ a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
+ hosts=a3_hosts, metadata=a3_metadata)
+ r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org',
key='good')
- self.assertEqual(r1['good'], set(['value']))
+ self.assertEqual(r1['good'], set(['value12', 'value23']))
self.assertFalse('fake_key1' in r1)
+ self.assertFalse('bad' in r1)
# Delete metadata
db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org',
@@ -1354,14 +1378,23 @@ class AggregateDBApiTestCase(test.TestCase):
def test_aggregate_host_get_by_metadata_key(self):
ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2'}
- values2 = {'name': 'fake_aggregate3'}
+ values2 = {'name': 'fake_aggregate12'}
+ values3 = {'name': 'fake_aggregate23'}
+ a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
+ a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
+ a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
+ a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
a1 = _create_aggregate_with_hosts(context=ctxt)
- a2 = _create_aggregate_with_hosts(context=ctxt, values=values)
- a3 = _create_aggregate_with_hosts(context=ctxt, values=values2,
- hosts=['foo.openstack.org'], metadata={'good': 'value'})
+ a2 = _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=a2_hosts, metadata=a2_metadata)
+ a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
+ hosts=a3_hosts, metadata=a3_metadata)
r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
- self.assertEqual(r1, {'foo.openstack.org': set(['value'])})
+ self.assertEqual({
+ 'foo1.openstack.org': set(['value12']),
+ 'foo2.openstack.org': set(['value12', 'value23']),
+ 'foo3.openstack.org': set(['value23']),
+ }, r1)
self.assertFalse('fake_key1' in r1)
def test_aggregate_get_by_host_not_found(self):
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index 6e25a827ca..6eecd844ef 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -52,7 +52,6 @@ from nova.virt.hyperv import networkutils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
-from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
from nova.virt import images
@@ -84,11 +83,13 @@ class HyperVAPITestCase(test.TestCase):
self._instance_ide_dvds = []
self._instance_volume_disks = []
self._test_vm_name = None
+ self._check_min_windows_version_satisfied = True
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
network_api_class='nova.network.quantumv2.api.API')
+ self.flags(force_volumeutils_v1=True, group='hyperv')
self._conn = driver_hyperv.HyperVDriver(None)
@@ -112,6 +113,11 @@ class HyperVAPITestCase(test.TestCase):
self.stubs.Set(glance, 'get_remote_image_service',
fake_get_remote_image_service)
+ def fake_check_min_windows_version(fake_self, major, minor):
+ return self._check_min_windows_version_satisfied
+ self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version',
+ fake_check_min_windows_version)
+
def fake_sleep(ms):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
@@ -120,10 +126,6 @@ class HyperVAPITestCase(test.TestCase):
pass
vmutils.VMUtils.__init__ = fake_vmutils__init__
- def fake_get_volume_utils(self):
- return volumeutils.VolumeUtils()
- volumeops.VolumeOps._get_volume_utils = fake_get_volume_utils
-
self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
self._mox.StubOutWithMock(fake.PathUtils, 'open')
self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
@@ -544,6 +546,11 @@ class HyperVAPITestCase(test.TestCase):
self._conn.destroy(self._instance_data, None)
self._mox.VerifyAll()
+ def test_live_migration_unsupported_os(self):
+ self._check_min_windows_version_satisfied = False
+ self._conn = driver_hyperv.HyperVDriver(None)
+ self._test_live_migration(unsupported_os=True)
+
def test_live_migration_without_volumes(self):
self._test_live_migration()
@@ -554,14 +561,15 @@ class HyperVAPITestCase(test.TestCase):
self._test_live_migration(test_failure=True)
def _test_live_migration(self, test_failure=False,
- with_volumes=False):
+ with_volumes=False,
+ unsupported_os=False):
dest_server = 'fake_server'
instance_data = self._get_instance_data()
instance_name = instance_data['name']
fake_post_method = self._mox.CreateMockAnything()
- if not test_failure:
+ if not test_failure and not unsupported_os:
fake_post_method(self._context, instance_data, dest_server,
False)
@@ -581,27 +589,32 @@ class HyperVAPITestCase(test.TestCase):
else:
fake_scsi_paths = {}
- m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
- instance_data['name'], dest_server)
- if test_failure:
- m.AndRaise(vmutils.HyperVException('Simulated failure'))
+ if not unsupported_os:
+ m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
+ instance_data['name'], dest_server)
+ if test_failure:
+ m.AndRaise(vmutils.HyperVException('Simulated failure'))
- if with_volumes:
- m.AndReturn([(fake_target_iqn, fake_target_lun)])
- volumeutils.VolumeUtils.logout_storage_target(fake_target_iqn)
- else:
- m.AndReturn([])
+ if with_volumes:
+ m.AndReturn([(fake_target_iqn, fake_target_lun)])
+ volumeutils.VolumeUtils.logout_storage_target(fake_target_iqn)
+ else:
+ m.AndReturn([])
self._mox.ReplayAll()
try:
+ hyperv_exception_raised = False
+ unsupported_os_exception_raised = False
self._conn.live_migration(self._context, instance_data,
dest_server, fake_post_method,
fake_recover_method)
- exception_raised = False
except vmutils.HyperVException:
- exception_raised = True
+ hyperv_exception_raised = True
+ except NotImplementedError:
+ unsupported_os_exception_raised = True
- self.assertTrue(not test_failure ^ exception_raised)
+ self.assertTrue(not test_failure ^ hyperv_exception_raised)
+ self.assertTrue(not unsupported_os ^ unsupported_os_exception_raised)
self._mox.VerifyAll()
def test_pre_live_migration_cow_image(self):
@@ -954,6 +967,26 @@ class HyperVAPITestCase(test.TestCase):
fake_mounted_disk)
m.WithSideEffects(self._add_volume_disk)
+ def _test_volumeutils_version(self, is_hyperv_2012=True,
+ force_volumeutils_v1=False):
+ self._check_min_windows_version_satisfied = is_hyperv_2012
+ self.flags(force_volumeutils_v1=force_volumeutils_v1, group='hyperv')
+ self._conn = driver_hyperv.HyperVDriver(None)
+ is_volutils_v2 = isinstance(self._conn._volumeops._volutils,
+ volumeutilsv2.VolumeUtilsV2)
+
+ self.assertTrue((is_hyperv_2012 and not force_volumeutils_v1) ^
+ (not is_volutils_v2))
+
+ def test_volumeutils_version_hyperv_2012(self):
+ self._test_volumeutils_version(True, False)
+
+ def test_volumeutils_version_hyperv_2012_force_v1(self):
+ self._test_volumeutils_version(True, True)
+
+ def test_volumeutils_version_hyperv_2008R2(self):
+ self._test_volumeutils_version(False, False)
+
def test_attach_volume(self):
instance_data = self._get_instance_data()
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 873e82f7a0..db3b907a11 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -2503,6 +2503,80 @@ class LibvirtConnTestCase(test.TestCase):
db.instance_destroy(self.context, instance_ref['uuid'])
+ def test_get_instance_disk_info_excludes_volumes(self):
+ # Test data
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume1'/>"
+ "<target dev='vdc' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume2'/>"
+ "<target dev='vdd' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ GB = 1024 * 1024 * 1024
+ fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
+ fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
+ fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ os.path.getsize('/test/disk').AndReturn((10737418240))
+ os.path.getsize('/test/disk.local').AndReturn((3328599655))
+
+ ret = ("image: /test/disk\n"
+ "file format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\n"
+ "disk size: 3.1G\n"
+ "cluster_size: 2097152\n"
+ "backing file: /test/dummy (actual path: /backing/file)\n")
+
+ self.mox.StubOutWithMock(os.path, "exists")
+ os.path.exists('/test/disk.local').AndReturn(True)
+
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
+ '/test/disk.local').AndReturn((ret, ''))
+
+ self.mox.ReplayAll()
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': [
+ {'connection_info': conn_info, 'mount_device': '/dev/vdc'},
+ {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_instance_disk_info(instance_ref['name'],
+ block_device_info=info)
+ info = jsonutils.loads(info)
+ self.assertEquals(info[0]['type'], 'raw')
+ self.assertEquals(info[0]['path'], '/test/disk')
+ self.assertEquals(info[0]['disk_size'], 10737418240)
+ self.assertEquals(info[0]['backing_file'], "")
+ self.assertEquals(info[0]['over_committed_disk_size'], 0)
+ self.assertEquals(info[1]['type'], 'qcow2')
+ self.assertEquals(info[1]['path'], '/test/disk.local')
+ self.assertEquals(info[1]['virt_disk_size'], 21474836480)
+ self.assertEquals(info[1]['backing_file'], "file")
+ self.assertEquals(info[1]['over_committed_disk_size'], 18146236825)
+
+ db.instance_destroy(self.context, instance_ref['uuid'])
+
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
@@ -3072,6 +3146,27 @@ class LibvirtConnTestCase(test.TestCase):
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
+ def test_destroy_timed_out(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy().AndRaise(libvirt.libvirtError("timed out"))
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_error_code(self):
+ return libvirt.VIR_ERR_OPERATION_TIMEOUT
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(libvirt.libvirtError, 'get_error_code',
+ fake_get_error_code)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.assertRaises(exception.InstancePowerOffFailure,
+ conn.destroy, instance, [])
+
def test_private_destroy_not_found(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
@@ -3716,6 +3811,38 @@ class LibvirtConnTestCase(test.TestCase):
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
+ def _test_shared_storage_detection(self, is_same):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(os, 'unlink')
+ conn.get_host_ip_addr().AndReturn('bar')
+ utils.execute('ssh', 'foo', 'touch', mox.IgnoreArg())
+ os.path.exists(mox.IgnoreArg()).AndReturn(is_same)
+ if is_same:
+ os.unlink(mox.IgnoreArg())
+ else:
+ utils.execute('ssh', 'foo', 'rm', mox.IgnoreArg())
+ self.mox.ReplayAll()
+ return conn._is_storage_shared_with('foo', '/path')
+
+ def test_shared_storage_detection_same_host(self):
+ self.assertTrue(self._test_shared_storage_detection(True))
+
+ def test_shared_storage_detection_different_host(self):
+ self.assertFalse(self._test_shared_storage_detection(False))
+
+ def test_shared_storage_detection_easy(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(os, 'unlink')
+ conn.get_host_ip_addr().AndReturn('foo')
+ self.mox.ReplayAll()
+ self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
+
class HostStateTestCase(test.TestCase):
@@ -3964,7 +4091,9 @@ class IptablesFirewallTestCase(test.TestCase):
from nova.network import linux_net
linux_net.iptables_manager.execute = fake_iptables_execute
- _fake_stub_out_get_nw_info(self.stubs, lambda *a, **kw: network_model)
+ from nova.compute import utils as compute_utils
+ self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
+ lambda instance: network_model)
network_info = network_model.legacy()
self.fw.prepare_instance_filter(instance_ref, network_info)
@@ -4409,6 +4538,17 @@ class LibvirtUtilsTestCase(test.TestCase):
result = libvirt_utils.get_iscsi_initiator()
self.assertEqual(initiator, result)
+ def test_get_missing_iscsi_initiator(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ file_path = '/etc/iscsi/initiatorname.iscsi'
+ utils.execute('cat', file_path, run_as_root=True).AndRaise(
+ exception.FileNotFound(file_path=file_path)
+ )
+ # Start test
+ self.mox.ReplayAll()
+ result = libvirt_utils.get_iscsi_initiator()
+ self.assertIsNone(result)
+
def test_create_image(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'create', '-f', 'raw',
@@ -4735,8 +4875,10 @@ class LibvirtDriverTestCase(test.TestCase):
.migrate_disk_and_power_off. """
self.counter = 0
+ self.checked_shared_storage = False
- def fake_get_instance_disk_info(instance, xml=None):
+ def fake_get_instance_disk_info(instance, xml=None,
+ block_device_info=None):
return '[]'
def fake_destroy(instance):
@@ -4753,11 +4895,17 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_os_path_exists(path):
return True
+ def fake_is_storage_shared(dest, inst_base):
+ self.checked_shared_storage = True
+ return False
+
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)
+ self.stubs.Set(self.libvirtconnection, '_is_storage_shared_with',
+ fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
@@ -4781,7 +4929,8 @@ class LibvirtDriverTestCase(test.TestCase):
'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
- def fake_get_instance_disk_info(instance, xml=None):
+ def fake_get_instance_disk_info(instance, xml=None,
+ block_device_info=None):
return disk_info_text
def fake_destroy(instance):
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index 78ca3d9e33..0610843a03 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -572,6 +572,37 @@ class PowerVMLocalVolumeAdapterTestCase(test.TestCase):
self.powervm_adapter = powervm_blockdev.PowerVMLocalVolumeAdapter(
self.connection)
+ def test_copy_image_file_ftp_failed(self):
+ file_path = os.tempnam('/tmp', 'image')
+ remote_path = '/mnt/openstack/images'
+ exp_remote_path = os.path.join(remote_path,
+ os.path.basename(file_path))
+ exp_cmd = ' '.join(['/usr/bin/rm -f', exp_remote_path])
+
+ fake_noop = lambda *args, **kwargs: None
+ fake_op = self.powervm_adapter
+ self.stubs.Set(fake_op, 'run_vios_command', fake_noop)
+ self.stubs.Set(fake_op, '_checksum_local_file', fake_noop)
+
+ self.mox.StubOutWithMock(common, 'ftp_put_command')
+ self.mox.StubOutWithMock(self.powervm_adapter,
+ 'run_vios_command_as_root')
+ msg_args = {'ftp_cmd': 'PUT',
+ 'source_path': file_path,
+ 'dest_path': remote_path}
+ exp_exception = exception.PowerVMFTPTransferFailed(**msg_args)
+
+ common.ftp_put_command(self.connection, file_path,
+ remote_path).AndRaise(exp_exception)
+
+ self.powervm_adapter.run_vios_command_as_root(exp_cmd).AndReturn([])
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.PowerVMFTPTransferFailed,
+ self.powervm_adapter._copy_image_file,
+ file_path, remote_path)
+
def test_copy_image_file_wrong_checksum(self):
file_path = os.tempnam('/tmp', 'image')
remote_path = '/mnt/openstack/images'
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index e2ce742740..d0c41bf756 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -105,7 +105,8 @@ class _FakeDriverBackendTestCase(object):
def fake_make_drive(_self, _path):
pass
- def fake_get_instance_disk_info(_self, instance, xml=None):
+ def fake_get_instance_disk_info(_self, instance, xml=None,
+ block_device_info=None):
return '[]'
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 789ea8565a..b8039f2d22 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -71,7 +71,7 @@ class VMwareAPIVMTestCase(test.TestCase):
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
- self.conn = driver.VMwareESXDriver(None, False)
+ self.conn = driver.VMwareVCDriver(None, False)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = utils.get_test_network_info(legacy_model=False)
@@ -91,6 +91,7 @@ class VMwareAPIVMTestCase(test.TestCase):
def _create_instance_in_the_db(self):
values = {'name': 1,
'id': 1,
+ 'uuid': "fake-uuid",
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': "1",
@@ -120,7 +121,8 @@ class VMwareAPIVMTestCase(test.TestCase):
self.assertEquals(len(instances), 1)
# Get Nova record for VM
- vm_info = self.conn.get_info({'name': 1})
+ vm_info = self.conn.get_info({'uuid': 'fake-uuid',
+ 'name': 1})
# Get record for VM
vms = vmwareapi_fake._get_objects("VirtualMachine")
@@ -177,7 +179,7 @@ class VMwareAPIVMTestCase(test.TestCase):
def test_spawn(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_snapshot(self):
@@ -191,11 +193,11 @@ class VMwareAPIVMTestCase(test.TestCase):
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
func_call_matcher.call)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.assertIsNone(func_call_matcher.match())
@@ -207,12 +209,23 @@ class VMwareAPIVMTestCase(test.TestCase):
def test_reboot(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'name': 1, 'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'name': 1, 'uuid': 'fake-uuid'})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_with_uuid(self):
+ """Test fall back to use name when can't find by uuid."""
+ self._create_vm()
+ info = self.conn.get_info({'name': 'fake-uuid', 'uuid': 'wrong-uuid'})
+ self._check_vm_info(info, power_state.RUNNING)
+ reboot_type = "SOFT"
+ self.conn.reboot(self.context, self.instance, self.network_info,
+ reboot_type)
+ info = self.conn.get_info({'name': 'fake-uuid', 'uuid': 'wrong-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
@@ -223,10 +236,10 @@ class VMwareAPIVMTestCase(test.TestCase):
def test_reboot_not_poweredon(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
self.context, self.instance, self.network_info,
@@ -234,10 +247,10 @@ class VMwareAPIVMTestCase(test.TestCase):
def test_suspend(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': "fake-uuid"})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SUSPENDED)
def test_suspend_non_existent(self):
@@ -247,13 +260,13 @@ class VMwareAPIVMTestCase(test.TestCase):
def test_resume(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.instance, self.network_info)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_resume_non_existent(self):
@@ -263,20 +276,20 @@ class VMwareAPIVMTestCase(test.TestCase):
def test_resume_not_suspended(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.instance, self.network_info)
def test_power_on(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SHUTDOWN)
self.conn.power_on(self.context, self.instance, self.network_info)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_power_on_non_existent(self):
@@ -286,10 +299,10 @@ class VMwareAPIVMTestCase(test.TestCase):
def test_power_off(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SHUTDOWN)
def test_power_off_non_existent(self):
@@ -300,19 +313,19 @@ class VMwareAPIVMTestCase(test.TestCase):
def test_power_off_suspended(self):
self._create_vm()
self.conn.suspend(self.instance)
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstancePowerOffFailure,
self.conn.power_off, self.instance)
def test_get_info(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_destroy(self):
self._create_vm()
- info = self.conn.get_info({'name': 1})
+ info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
@@ -366,14 +379,10 @@ class VMwareAPIVMTestCase(test.TestCase):
self.instance)
def test_get_vnc_console(self):
- vm_ref = fake_vm_ref()
self._create_instance_in_the_db()
self._create_vm()
- self.mox.StubOutWithMock(self.conn._vmops, '_get_vnc_port')
- self.conn._vmops._get_vnc_port(mox.IgnoreArg()).AndReturn(5910)
- self.mox.ReplayAll()
vnc_dict = self.conn.get_vnc_console(self.instance)
- self.assertEquals(vnc_dict['host'], "test_url")
+ self.assertEquals(vnc_dict['host'], "ha-host")
self.assertEquals(vnc_dict['port'], 5910)
def test_host_ip_addr(self):
diff --git a/nova/tests/test_vmwareapi_vm_util.py b/nova/tests/test_vmwareapi_vm_util.py
index eda2c25f92..7cb699aad0 100644
--- a/nova/tests/test_vmwareapi_vm_util.py
+++ b/nova/tests/test_vmwareapi_vm_util.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
+
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
@@ -15,6 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import collections
+
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
@@ -32,17 +35,19 @@ class fake_session(object):
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
+ fake.reset()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
+ fake.reset()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
fake_session([fake.Datastore()]))
self.assertEquals(result[1], "fake-ds")
- self.assertEquals(result[2], 1024 * 1024 * 1024)
- self.assertEquals(result[3], 1024 * 1024 * 500)
+ self.assertEquals(result[2], 1024 * 1024 * 1024 * 1024)
+ self.assertEquals(result[3], 1024 * 1024 * 500 * 1024)
def test_get_datastore_ref_and_name_without_datastore(self):
@@ -53,3 +58,95 @@ class VMwareVMUtilTestCase(test.TestCase):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
+
+ def test_get_host_ref_from_id(self):
+
+ fake_host_sys = fake.HostSystem(
+ fake.ManagedObjectReference("HostSystem", "host-123"))
+
+ fake_host_id = fake_host_sys.obj.value
+ fake_host_name = "ha-host"
+
+ ref = vm_util.get_host_ref_from_id(
+ fake_session([fake_host_sys]), fake_host_id, ['name'])
+
+ self.assertIsInstance(ref, fake.HostSystem)
+ self.assertEqual(fake_host_id, ref.obj.value)
+
+ host_name = vm_util.get_host_name_from_host_ref(ref)
+
+ self.assertEquals(fake_host_name, host_name)
+
+ def test_get_host_name_for_vm(self):
+
+ fake_vm = fake.ManagedObject(
+ "VirtualMachine", fake.ManagedObjectReference(
+ "vm-123", "VirtualMachine"))
+ fake_vm.propSet.append(
+ fake.Property('name', 'vm-123'))
+
+ vm_ref = vm_util.get_vm_ref_from_name(
+ fake_session([fake_vm]), 'vm-123')
+
+ self.assertIsNotNone(vm_ref)
+
+ fake_results = [
+ fake.ObjectContent(
+ None, [
+ fake.Property('runtime.host',
+ fake.ManagedObjectReference(
+ 'host-123', 'HostSystem'))
+ ])]
+
+ host_id = vm_util.get_host_id_from_vm_ref(
+ fake_session(fake_results), vm_ref)
+
+ self.assertEqual('host-123', host_id)
+
+ def test_property_from_property_set(self):
+
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+ MoRef = collections.namedtuple('Val', ['value'])
+
+ results_good = [
+ ObjectContent(propSet=[
+ DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
+ ObjectContent(propSet=[
+ DynamicProperty(name='foo', val=MoRef(value='bar1')),
+ DynamicProperty(
+ name='runtime.host', val=MoRef(value='host-123')),
+ DynamicProperty(name='foo', val=MoRef(value='bar2')),
+ ]),
+ ObjectContent(propSet=[
+ DynamicProperty(
+ name='something', val=MoRef(value='thing'))]), ]
+
+ results_bad = [
+ ObjectContent(propSet=[
+ DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
+ ObjectContent(propSet=[
+ DynamicProperty(name='foo', val='bar1'),
+ DynamicProperty(name='foo', val='bar2'), ]),
+ ObjectContent(propSet=[
+ DynamicProperty(
+ name='something', val=MoRef(value='thing'))]), ]
+
+ prop = vm_util.property_from_property_set(
+ 'runtime.host', results_good)
+ self.assertIsNotNone(prop)
+ value = prop.val.value
+ self.assertEqual('host-123', value)
+
+ prop2 = vm_util.property_from_property_set(
+ 'runtime.host', results_bad)
+ self.assertIsNone(prop2)
+
+ prop3 = vm_util.property_from_property_set('foo', results_good)
+ self.assertIsNotNone(prop3)
+ val3 = prop3.val.value
+ self.assertEqual('bar1', val3)
+
+ prop4 = vm_util.property_from_property_set('foo', results_bad)
+ self.assertIsNotNone(prop4)
+ self.assertEqual('bar1', prop4.val)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 14af7e238e..8141e7527d 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -2074,8 +2074,9 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
network_model = fake_network.fake_get_instance_nw_info(self.stubs,
1, spectacular=True)
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
- lambda *a, **kw: network_model)
+ from nova.compute import utils as compute_utils
+ self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
+ lambda instance: network_model)
network_info = network_model.legacy()
self.fw.prepare_instance_filter(instance_ref, network_info)
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
index 54e3cf43bd..babdb4fb75 100644
--- a/nova/tests/vmwareapi/db_fakes.py
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -20,7 +20,6 @@ Stubouts, mocks and fixtures for the test suite
"""
import time
-import uuid
from nova.compute import task_states
from nova.compute import vm_states
@@ -63,7 +62,7 @@ def stub_out_db_instance_api(stubs):
base_options = {
'name': values['name'],
'id': values['id'],
- 'uuid': uuid.uuid4(),
+ 'uuid': values['uuid'],
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 86c94e2297..892fe6aaeb 100755
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -936,4 +936,4 @@ def load_compute_driver(virtapi, compute_driver=None):
def compute_driver_matches(match):
- return CONF.compute_driver.endswith(match)
+ return CONF.compute_driver and CONF.compute_driver.endswith(match)
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index b61b57cfbb..e577b47263 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -19,9 +19,8 @@
from oslo.config import cfg
-from nova import conductor
+from nova.compute import utils as compute_utils
from nova import context
-from nova import network
from nova.network import linux_net
from nova.openstack.common import importutils
from nova.openstack.common import lockutils
@@ -409,18 +408,9 @@ class IptablesFirewallDriver(FirewallDriver):
fw_rules += [' '.join(args)]
else:
if rule['grantee_group']:
- # FIXME(jkoelker) This needs to be ported up into
- # the compute manager which already
- # has access to a nw_api handle,
- # and should be the only one making
- # making rpc calls.
- nw_api = network.API()
- capi = conductor.API()
for instance in rule['grantee_group']['instances']:
- nw_info = nw_api.get_instance_nw_info(
- ctxt,
- instance,
- conductor_api=capi)
+ nw_info = compute_utils.get_nw_info_for_instance(
+ instance)
ips = [ip['address']
for ip in nw_info.fixed_ips()
diff --git a/nova/virt/hyperv/hostutils.py b/nova/virt/hyperv/hostutils.py
index d28ce75a5e..843796e000 100644
--- a/nova/virt/hyperv/hostutils.py
+++ b/nova/virt/hyperv/hostutils.py
@@ -71,6 +71,10 @@ class HostUtils(object):
% drive)[0]
return (long(logical_disk.Size), long(logical_disk.FreeSpace))
+ def check_min_windows_version(self, major, minor, build=0):
+ version_str = self.get_windows_version()
+ return map(int, version_str.split('.')) >= [major, minor, build]
+
def get_windows_version(self):
return self._conn_cimv2.Win32_OperatingSystem()[0].Version
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index adca7b8f3f..cbf2302454 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -18,10 +18,13 @@
"""
Management class for live migration VM operations.
"""
+import functools
+
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
+from nova.virt.hyperv import hostutils
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import livemigrationutils
from nova.virt.hyperv import pathutils
@@ -32,13 +35,30 @@ CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
+def check_os_version_requirement(function):
+ @functools.wraps(function)
+ def wrapper(self, *args, **kwds):
+ if not self._livemigrutils:
+ raise NotImplementedError(_('Live migration is supported '
+ 'starting with Hyper-V Server '
+ '2012'))
+ return function(self, *args, **kwds)
+ return wrapper
+
+
class LiveMigrationOps(object):
def __init__(self):
+ # Live migration is supported starting from Hyper-V Server 2012
+ if hostutils.HostUtils().check_min_windows_version(6, 2):
+ self._livemigrutils = livemigrationutils.LiveMigrationUtils()
+ else:
+ self._livemigrutils = None
+
self._pathutils = pathutils.PathUtils()
- self._livemigrutils = livemigrationutils.LiveMigrationUtils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
+ @check_os_version_requirement
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
@@ -60,6 +80,7 @@ class LiveMigrationOps(object):
instance_name)
post_method(context, instance_ref, dest, block_migration)
+ @check_os_version_requirement
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug(_("pre_live_migration called"), instance=instance)
@@ -73,11 +94,13 @@ class LiveMigrationOps(object):
self._volumeops.login_storage_targets(block_device_info)
+ @check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug(_("post_live_migration_at_destination called"),
instance=instance_ref)
+ @check_os_version_requirement
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
@@ -85,10 +108,12 @@ class LiveMigrationOps(object):
LOG.debug(_("check_can_live_migrate_destination called"), instance_ref)
return {}
+ @check_os_version_requirement
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug(_("check_can_live_migrate_destination_cleanup called"))
+ @check_os_version_requirement
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug(_("check_can_live_migrate_source called"), instance_ref)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index 74953435a7..6e33f1967c 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -69,7 +69,7 @@ class VolumeOps(object):
def _get_volume_utils(self):
if(not CONF.hyperv.force_volumeutils_v1 and
- self._hostutils.get_windows_version() >= 6.2):
+ self._hostutils.check_min_windows_version(6, 2)):
return volumeutilsv2.VolumeUtilsV2()
else:
return volumeutils.VolumeUtils()
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index fd4f5c4dbc..8eae45248f 100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -714,6 +714,11 @@ class LibvirtDriver(driver.ComputeDriver):
state = LIBVIRT_POWER_STATE[state]
if state == power_state.SHUTDOWN:
is_okay = True
+ elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
+ LOG.warn(_("Cannot destroy instance, operation time out"),
+ instance=instance)
+ reason = _("operation time out")
+ raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
@@ -908,15 +913,12 @@ class LibvirtDriver(driver.ComputeDriver):
'world wide port names'),
instance=instance)
- if not self._initiator and not self._fc_wwnns and not self._fc_wwpns:
- msg = _("No Volume Connector found.")
- LOG.error(msg)
- raise exception.NovaException(msg)
-
connector = {'ip': CONF.my_ip,
- 'initiator': self._initiator,
'host': CONF.host}
+ if self._initiator:
+ connector['initiator'] = self._initiator
+
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
@@ -1367,7 +1369,8 @@ class LibvirtDriver(driver.ComputeDriver):
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
- disk_info_json = self.get_instance_disk_info(instance['name'], xml)
+ disk_info_json = self.get_instance_disk_info(instance['name'], xml,
+ block_device_info)
self._create_images_and_backing(context, instance, disk_info_json)
# Initialize all the necessary networking, block devices and
@@ -3319,7 +3322,8 @@ class LibvirtDriver(driver.ComputeDriver):
dom = self._lookup_by_name(instance_ref["name"])
self._conn.defineXML(dom.XMLDesc(0))
- def get_instance_disk_info(self, instance_name, xml=None):
+ def get_instance_disk_info(self, instance_name, xml=None,
+ block_device_info=None):
"""Preparation block migration.
:params instance_ref:
@@ -3349,15 +3353,27 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
+ # NOTE (rmk): When block_device_info is provided, we will use it to
+ # filter out devices which are actually volumes.
+ block_device_mapping = driver.block_device_info_get_mapping(
+ block_device_info)
+
+ volume_devices = set()
+ for vol in block_device_mapping:
+ disk_dev = vol['mount_device'].rpartition("/")[2]
+ volume_devices.add(disk_dev)
+
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
+ target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
+ target = target_nodes[cnt].attrib['dev']
if disk_type != 'file':
LOG.debug(_('skipping %(path)s since it looks like volume') %
@@ -3370,6 +3386,11 @@ class LibvirtDriver(driver.ComputeDriver):
locals())
continue
+ if target in volume_devices:
+ LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
+ 'volume'), {'path': path, 'target': target})
+ continue
+
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
@@ -3439,22 +3460,46 @@ class LibvirtDriver(driver.ComputeDriver):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context, all_instances)
- def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
+ def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
+ shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
- utils.execute('ssh', dest, 'rm', '-rf', inst_base)
+ if not shared_storage:
+ utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
+ def _is_storage_shared_with(self, dest, inst_base):
+ # NOTE (rmk): There are two methods of determining whether we are
+ # on the same filesystem: the source and dest IP are the
+ # same, or we create a file on the dest system via SSH
+ # and check whether the source system can also see it.
+ shared_storage = (dest == self.get_host_ip_addr())
+ if not shared_storage:
+ tmp_file = uuid.uuid4().hex + '.tmp'
+ tmp_path = os.path.join(inst_base, tmp_file)
+
+ try:
+ utils.execute('ssh', dest, 'touch', tmp_path)
+ if os.path.exists(tmp_path):
+ shared_storage = True
+ os.unlink(tmp_path)
+ else:
+ utils.execute('ssh', dest, 'rm', tmp_path)
+ except Exception:
+ pass
+ return shared_storage
+
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
- disk_info_text = self.get_instance_disk_info(instance['name'])
+ disk_info_text = self.get_instance_disk_info(instance['name'],
+ block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
self.power_off(instance)
@@ -3471,12 +3516,13 @@ class LibvirtDriver(driver.ComputeDriver):
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
- same_host = (dest == self.get_host_ip_addr())
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
+
+ shared_storage = self._is_storage_shared_with(dest, inst_base)
try:
utils.execute('mv', inst_base, inst_base_resize)
- if same_host:
+ if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
else:
@@ -3492,7 +3538,7 @@ class LibvirtDriver(driver.ComputeDriver):
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
- if same_host:
+ if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
@@ -3503,7 +3549,8 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
- inst_base_resize)
+ inst_base_resize,
+ shared_storage)
return disk_info_text
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 4d7a06648e..6972243d70 100755
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -52,7 +52,11 @@ def get_iscsi_initiator():
"""Get iscsi initiator name for this machine."""
# NOTE(vish) openiscsi stores initiator name in a file that
# needs root permission to read.
- contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
+ try:
+ contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
+ except exception.FileNotFound:
+ return None
+
for l in contents.split('\n'):
if l.startswith('InitiatorName='):
return l[l.index('=') + 1:].strip()
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
index 0f5580197a..2f984d0f5c 100644
--- a/nova/virt/powervm/blockdev.py
+++ b/nova/virt/powervm/blockdev.py
@@ -423,9 +423,14 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
# If the image does not exist already
if not output:
- # Copy file to IVM
- common.ftp_put_command(self.connection_data, source_path,
- remote_path)
+ try:
+ # Copy file to IVM
+ common.ftp_put_command(self.connection_data, source_path,
+ remote_path)
+ except exception.PowerVMFTPTransferFailed:
+ with excutils.save_and_reraise_exception():
+ cmd = "/usr/bin/rm -f %s" % final_path
+ self.run_vios_command_as_root(cmd)
# Verify image file checksums match
output = self._md5sum_remote_file(final_path)
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 9b9cb301b7..014ca0f723 100755
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -126,6 +126,12 @@ class Failure(Exception):
class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
+ # VMwareAPI has both ESXi and vCenter API sets.
+ # The ESXi API are a proper sub-set of the vCenter API.
+ # That is to say, nearly all valid ESXi calls are
+ # valid vCenter calls. There are some small edge-case
+ # exceptions regarding VNC, CIM, User management & SSO.
+
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMwareESXDriver, self).__init__(virtapi)
@@ -335,6 +341,14 @@ class VMwareESXDriver(driver.ComputeDriver):
class VMwareVCDriver(VMwareESXDriver):
"""The ESX host connection object."""
+ # The vCenter driver includes several additional VMware vSphere
+ # capabilities that include API that act on hosts or groups of
+ # hosts in clusters or non-cluster logical-groupings.
+ #
+ # vCenter is not a hypervisor itself, it works with multiple
+ # hypervisor host machines and their guests. This fact can
+ # subtly alter how vSphere and OpenStack interoperate.
+
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi)
if not self._cluster_name:
@@ -391,6 +405,14 @@ class VMwareVCDriver(VMwareESXDriver):
post_method, recover_method,
block_migration)
+ def get_vnc_console(self, instance):
+ """Return link to instance's VNC console using vCenter logic."""
+ # In this situation, ESXi and vCenter require different
+ # API logic to create a valid VNC console connection object.
+ # In specific, vCenter does not actually run the VNC service
+ # itself. You must talk to the VNC host underneath vCenter.
+ return self._vmops.get_vnc_console_vcenter(instance)
+
class VMwareAPISession(object):
"""
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index e088d2302a..4f939a0e02 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
@@ -20,6 +21,7 @@
A fake VMware VI API implementation.
"""
+import collections
import pprint
import uuid
@@ -81,22 +83,72 @@ def _get_objects(obj_type):
return lst_objs
-class Prop(object):
+class Property(object):
"""Property Object base class."""
- def __init__(self):
- self.name = None
- self.val = None
+ def __init__(self, name=None, val=None):
+ self.name = name
+ self.val = val
+
+
+class ManagedObjectReference(object):
+ """A managed object reference is a remote identifier."""
+
+ def __init__(self, value="object-123", _type="ManagedObject"):
+ super(ManagedObjectReference, self)
+ # Managed Object Reference value attributes
+ # typically have values like vm-123 or
+ # host-232 and not UUID.
+ self.value = value
+ # Managed Object Reference _type
+ # attributes hold the name of the type
+ # of the vCenter object the value
+ # attribute is the identifier for
+ self._type = _type
+
+
+class ObjectContent(object):
+ """ObjectContent array holds dynamic properties."""
+
+ # This class is a *fake* of a class sent back to us by
+ # SOAP. It has its own names. These names are decided
+ # for us by the API we are *faking* here.
+ def __init__(self, obj_ref, prop_list=None, missing_list=None):
+ self.obj = obj_ref
+
+ if not isinstance(prop_list, collections.Iterable):
+ prop_list = []
+
+ if not isinstance(missing_list, collections.Iterable):
+ missing_list = []
+
+ # propSet is the name your Python code will need to
+ # use since this is the name that the API will use
+ self.propSet = prop_list
+
+ # missingSet is the name your python code will
+ # need to use since this is the name that the
+ # API we are talking to will use.
+ self.missingSet = missing_list
class ManagedObject(object):
- """Managed Data Object base class."""
+ """Managed Object base class."""
- def __init__(self, name="ManagedObject", obj_ref=None):
+ def __init__(self, name="ManagedObject", obj_ref=None, value=None):
"""Sets the obj property which acts as a reference to the object."""
super(ManagedObject, self).__setattr__('objName', name)
+
+ # A managed object is a local representation of a
+ # remote object that you can reference using the
+ # object reference.
if obj_ref is None:
- obj_ref = str(uuid.uuid4())
+ if value is None:
+ value = 'obj-123'
+ obj_ref = ManagedObjectReference(value, name)
+
+ # we use __setattr__ here because below the
+ # default setter has been altered for this class.
object.__setattr__(self, 'obj', obj_ref)
object.__setattr__(self, 'propSet', [])
@@ -116,16 +168,20 @@ class ManagedObject(object):
return self.__getattr__(attr)
def __setattr__(self, attr, val):
+ # TODO(hartsocks): this is adds unnecessary complexity to the class
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
- elem = Prop()
+ elem = Property()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
+ # TODO(hartsocks): remove this
+ # in a real ManagedObject you have to iterate the propSet
+ # in a real ManagedObject, the propSet is a *set* not a list
for elem in self.propSet:
if elem.name == attr:
return elem.val
@@ -185,7 +241,7 @@ class VirtualMachine(ManagedObject):
"""Virtual Machine class."""
def __init__(self, **kwargs):
- super(VirtualMachine, self).__init__("VirtualMachine")
+ super(VirtualMachine, self).__init__("VirtualMachine", value='vm-10')
self.set("name", kwargs.get("name"))
self.set("runtime.connectionState",
kwargs.get("conn_state", "connected"))
@@ -203,6 +259,8 @@ class VirtualMachine(ManagedObject):
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("config.hardware.device", kwargs.get("virtual_device", None))
self.set("config.extraConfig", kwargs.get("extra_config", None))
+ self.set('runtime.host',
+ ManagedObjectReference(value='host-123', _type="HostSystem"))
self.device = kwargs.get("virtual_device")
def reconfig(self, factory, val):
@@ -256,8 +314,8 @@ class Datastore(ManagedObject):
super(Datastore, self).__init__("Datastore")
self.set("summary.type", "VMFS")
self.set("summary.name", "fake-ds")
- self.set("summary.capacity", 1024 * 1024 * 1024)
- self.set("summary.freeSpace", 500 * 1024 * 1024)
+ self.set("summary.capacity", 1024 * 1024 * 1024 * 1024)
+ self.set("summary.freeSpace", 500 * 1024 * 1024 * 1024)
class HostNetworkSystem(ManagedObject):
@@ -279,8 +337,8 @@ class HostNetworkSystem(ManagedObject):
class HostSystem(ManagedObject):
"""Host System class."""
- def __init__(self):
- super(HostSystem, self).__init__("HostSystem")
+ def __init__(self, obj_ref=None, value='host-123'):
+ super(HostSystem, self).__init__("HostSystem", obj_ref, value)
self.set("name", "ha-host")
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
diff --git a/nova/virt/vmwareapi/host.py b/nova/virt/vmwareapi/host.py
index 8c62c5ce9b..7abff5678b 100644
--- a/nova/virt/vmwareapi/host.py
+++ b/nova/virt/vmwareapi/host.py
@@ -126,8 +126,8 @@ class HostState(object):
"sockets": summary.hardware.numCpuPkgs,
"threads": summary.hardware.numCpuThreads}
}
- data["disk_total"] = ds[2] / (1024 * 1024)
- data["disk_available"] = ds[3] / (1024 * 1024)
+ data["disk_total"] = ds[2] / (1024 * 1024 * 1024)
+ data["disk_available"] = ds[3] / (1024 * 1024 * 1024)
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024)
data["host_memory_free"] = data["host_memory_total"] - \
@@ -193,8 +193,8 @@ class VCState(object):
"sockets": summary.hardware.numCpuPkgs,
"threads": summary.hardware.numCpuThreads}
}
- data["disk_total"] = ds[2] / (1024 * 1024)
- data["disk_available"] = ds[3] / (1024 * 1024)
+ data["disk_total"] = ds[2] / (1024 * 1024 * 1024)
+ data["disk_available"] = ds[3] / (1024 * 1024 * 1024)
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024)
data["host_memory_free"] = data["host_memory_total"] -\
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index b04f4148b5..c6f660fd65 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -20,6 +20,7 @@ The VMware API VM utility module to build SOAP object specs.
"""
import copy
+
from nova import exception
from nova.virt.vmwareapi import vim_util
@@ -47,7 +48,7 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
vif_infos, os_type="otherGuest"):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
- config_spec.name = instance['name']
+ config_spec.name = instance['uuid']
config_spec.guestId = os_type
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
@@ -503,6 +504,130 @@ def get_vm_ref_from_name(session, vm_name):
return None
+def get_vm_ref_from_uuid(session, instance_uuid):
+ """Get reference to the VM with the uuid specified."""
+ vms = session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name"])
+ for vm in vms:
+ if vm.propSet[0].val == instance_uuid:
+ return vm.obj
+
+
+def get_vm_ref(session, instance):
+ """Get reference to the VM through uuid or vm name."""
+ vm_ref = get_vm_ref_from_uuid(session, instance['uuid'])
+ if not vm_ref:
+ vm_ref = get_vm_ref_from_name(session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ return vm_ref
+
+
+def get_host_ref_from_id(session, host_id, property_list=None):
+ """Get a host reference object for a host_id string."""
+
+ if property_list is None:
+ property_list = ['name']
+
+ host_refs = session._call_method(
+ vim_util, "get_objects",
+ "HostSystem", property_list)
+
+ for ref in host_refs:
+ if ref.obj.value == host_id:
+ return ref
+
+
+def get_host_id_from_vm_ref(session, vm_ref):
+ """
+ This method allows you to find the managed object
+ ID of the host running a VM. Since vMotion can
+ change the value, you should not presume that this
+ is a value that you can cache for very long and
+ should be prepared to allow for it to change.
+
+ :param session: a vSphere API connection
+ :param vm_ref: a reference object to the running VM
+ :return: the host_id running the virtual machine
+ """
+
+ # to prevent typographical errors below
+ property_name = 'runtime.host'
+
+ # a property collector in VMware vSphere Management API
+ # is a set of local representations of remote values.
+ # property_set here, is a local representation of the
+ # properties we are querying for.
+ property_set = session._call_method(
+ vim_util, "get_object_properties",
+ None, vm_ref, vm_ref._type, [property_name])
+
+ prop = property_from_property_set(
+ property_name, property_set)
+
+ if prop is not None:
+ prop = prop.val.value
+ else:
+ # reaching here represents an impossible state
+ raise RuntimeError(
+ "Virtual Machine %s exists without a runtime.host!"
+ % (vm_ref))
+
+ return prop
+
+
+def property_from_property_set(property_name, property_set):
+ '''
+ Use this method to filter property collector results.
+
+ Because network traffic is expensive, multiple
+ VMwareAPI calls will sometimes pile-up properties
+ to be collected. That means results may contain
+ many different values for multiple purposes.
+
+ This helper will filter a list for a single result
+ and filter the properties of that result to find
+ the single value of whatever type resides in that
+ result. This could be a ManagedObjectReference ID
+ or a complex value.
+
+ :param property_name: name of property you want
+ :param property_set: all results from query
+ :return: the value of the property.
+ '''
+
+ for prop in property_set:
+ p = _property_from_propSet(prop.propSet, property_name)
+ if p is not None:
+ return p
+
+
+def _property_from_propSet(propSet, name='name'):
+ for p in propSet:
+ if p.name == name:
+ return p
+
+
+def get_host_ref_for_vm(session, instance, props):
+ """Get the ESXi host running a VM by its name."""
+
+ vm_ref = get_vm_ref(session, instance)
+ host_id = get_host_id_from_vm_ref(session, vm_ref)
+ return get_host_ref_from_id(session, host_id, props)
+
+
+def get_host_name_for_vm(session, instance):
+ """Get the ESXi host running a VM by its name."""
+ host_ref = get_host_ref_for_vm(session, instance, ['name'])
+ return get_host_name_from_host_ref(host_ref)
+
+
+def get_host_name_from_host_ref(host_ref):
+ p = _property_from_propSet(host_ref.propSet)
+ if p is not None:
+ return p.val
+
+
def get_cluster_ref_from_name(session, cluster_name):
"""Get reference to the cluster with the name specified."""
cls = session._call_method(vim_util, "get_objects",
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 3d37a7a993..1c791e373b 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -133,9 +133,6 @@ class VMwareVMOps(object):
4. Attach the disk to the VM by reconfiguring the same.
5. Power on the VM.
"""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref:
- raise exception.InstanceExists(name=instance['name'])
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -213,7 +210,7 @@ class VMwareVMOps(object):
LOG.debug(_("Created VM on the ESX host"), instance=instance)
_execute_create_vm()
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
@@ -356,7 +353,7 @@ class VMwareVMOps(object):
upload_folder = self._instance_path_base
upload_name = instance['image_ref']
else:
- upload_folder = instance['name']
+ upload_folder = instance['uuid']
upload_name = instance['name']
# The vmdk meta-data file
@@ -413,7 +410,7 @@ class VMwareVMOps(object):
root_disk = driver.block_device_info_get_mapping(
block_device_info)[0]
connection_info = root_disk['connection_info']
- self._volumeops.attach_volume(connection_info, instance['name'],
+ self._volumeops.attach_volume(connection_info, instance['uuid'],
self._default_root_device)
def _power_on_vm():
@@ -442,10 +439,7 @@ class VMwareVMOps(object):
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
-
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -475,7 +469,7 @@ class VMwareVMOps(object):
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
- name="%s-snapshot" % instance['name'],
+ name="%s-snapshot" % instance['uuid'],
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
@@ -590,10 +584,7 @@ class VMwareVMOps(object):
def reboot(self, instance, network_info):
"""Reboot a VM instance."""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
-
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
self.plug_vifs(instance, network_info)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
@@ -640,14 +631,8 @@ class VMwareVMOps(object):
2. Destroy the VM.
"""
try:
- vm_ref = vm_util.get_vm_ref_from_name(self._session,
- instance['name'])
- if vm_ref is None:
- LOG.debug(_("instance not present"), instance=instance)
- return
-
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
self.power_off(instance)
-
try:
LOG.debug(_("Destroying the VM"), instance=instance)
destroy_task = self._session._call_method(
@@ -672,11 +657,7 @@ class VMwareVMOps(object):
3. Delete the contents of the folder holding the VM related data.
"""
try:
- vm_ref = vm_util.get_vm_ref_from_name(self._session,
- instance['name'])
- if vm_ref is None:
- LOG.debug(_("instance not present"), instance=instance)
- return
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["config.files.vmPathName", "runtime.powerState"]
props = self._session._call_method(vim_util,
"get_object_properties",
@@ -755,10 +736,7 @@ class VMwareVMOps(object):
def suspend(self, instance):
"""Suspend the specified instance."""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
-
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
@@ -779,10 +757,7 @@ class VMwareVMOps(object):
def resume(self, instance):
"""Resume the specified instance."""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
-
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
@@ -804,9 +779,7 @@ class VMwareVMOps(object):
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
self.power_off(instance)
instance['name'] = instance['name'] + self._rescue_suffix
@@ -820,7 +793,10 @@ class VMwareVMOps(object):
= vm_util.get_vmdk_path_and_adapter_type(hardware_devices)
# Figure out the correct unit number
unit_number = unit_number + 1
- rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
+ rescue_vm_ref = vm_util.get_vm_ref_from_uuid(self._session,
+ instance['uuid'])
+ if rescue_vm_ref is None:
+ rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
instance['name'])
self._volumeops.attach_disk_to_vm(
rescue_vm_ref, instance,
@@ -838,9 +814,7 @@ class VMwareVMOps(object):
def power_off(self, instance):
"""Power off the specified instance."""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -863,9 +837,7 @@ class VMwareVMOps(object):
def _power_on(self, instance):
"""Power on the specified instance."""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -916,9 +888,7 @@ class VMwareVMOps(object):
step=0,
total_steps=RESIZE_TOTAL_STEPS)
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['name'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
host_ref = self._get_host_ref_from_name(dest)
if host_ref is None:
raise exception.HostNotFound(host=dest)
@@ -969,7 +939,9 @@ class VMwareVMOps(object):
"""Confirms a resize, destroying the source VM."""
instance_name = self._get_orig_vm_name_label(instance)
# Destroy the original VM.
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
+ vm_ref = vm_util.get_vm_ref_from_uuid(self._session, instance['uuid'])
+ if vm_ref is None:
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
if vm_ref is None:
LOG.debug(_("instance not present"), instance=instance)
return
@@ -1002,7 +974,7 @@ class VMwareVMOps(object):
instance=instance)
rename_task = self._session._call_method(
self._session._get_vim(),
- "Rename_Task", vm_ref, newName=instance['name'])
+ "Rename_Task", vm_ref, newName=instance['uuid'])
self._session._wait_for_task(instance['uuid'], rename_task)
LOG.debug(_("Renamed the VM from %s") % name_label,
instance=instance)
@@ -1023,9 +995,8 @@ class VMwareVMOps(object):
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False):
"""Spawning live_migration operation for distributing high-load."""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_ref.name)
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance_ref.name)
+ vm_ref = vm_util.get_vm_ref(self._session, instance_ref)
+
host_ref = self._get_host_ref_from_name(dest)
if host_ref is None:
raise exception.HostNotFound(host=dest)
@@ -1061,9 +1032,7 @@ class VMwareVMOps(object):
def get_info(self, instance):
"""Return data about the VM instance."""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['name'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
@@ -1097,9 +1066,8 @@ class VMwareVMOps(object):
def get_console_output(self, instance):
"""Return snapshot of console."""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
+
param_list = {"id": str(vm_ref)}
base_url = "%s://%s/screen?%s" % (self._session._scheme,
self._session._host_ip,
@@ -1118,14 +1086,32 @@ class VMwareVMOps(object):
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
return {'host': CONF.vmwareapi_host_ip,
'port': self._get_vnc_port(vm_ref),
'internal_access_path': None}
+ def get_vnc_console_vcenter(self, instance):
+ """Return connection info for a vnc console using vCenter logic."""
+
+ # vCenter does not run virtual machines and does not run
+ # a VNC proxy. Instead, you need to tell OpenStack to talk
+ # directly to the ESX host running the VM you are attempting
+ # to connect to via VNC.
+
+ vnc_console = self.get_vnc_console(instance)
+ host_name = vm_util.get_host_name_for_vm(
+ self._session,
+ instance)
+ vnc_console['host'] = host_name
+
+ # NOTE: VM can move hosts in some situations. Debug for admins.
+ LOG.debug(_("VM %(uuid)s is currently on host %(host_name)s"),
+ {'uuid': instance['name'], 'host_name': host_name})
+
+ return vnc_console
+
@staticmethod
def _get_vnc_port(vm_ref):
"""Return VNC port for an VM."""
@@ -1166,9 +1152,7 @@ class VMwareVMOps(object):
Set the machine id of the VM for guest tools to pick up and reconfigure
the network interfaces.
"""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
machine_id_change_spec = vm_util.get_machine_id_change_spec(
client_factory,
@@ -1187,9 +1171,7 @@ class VMwareVMOps(object):
"""
Set the vnc configuration of the VM.
"""
- vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
vnc_config_spec = vm_util.get_vnc_config_spec(
client_factory, port, password)