summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/source/admin/remote-console-access.rst5
-rw-r--r--nova/compute/api.py69
-rw-r--r--nova/compute/manager.py41
-rw-r--r--nova/compute/resource_tracker.py21
-rw-r--r--nova/conductor/manager.py21
-rw-r--r--nova/conductor/tasks/live_migrate.py4
-rw-r--r--nova/locale/cs/LC_MESSAGES/nova.po26
-rw-r--r--nova/locale/de/LC_MESSAGES/nova.po26
-rw-r--r--nova/locale/es/LC_MESSAGES/nova.po26
-rw-r--r--nova/locale/fr/LC_MESSAGES/nova.po26
-rw-r--r--nova/locale/it/LC_MESSAGES/nova.po26
-rw-r--r--nova/locale/ja/LC_MESSAGES/nova.po26
-rw-r--r--nova/locale/ko_KR/LC_MESSAGES/nova.po28
-rw-r--r--nova/locale/pt_BR/LC_MESSAGES/nova.po28
-rw-r--r--nova/locale/ru/LC_MESSAGES/nova.po26
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova.po28
-rw-r--r--nova/locale/zh_CN/LC_MESSAGES/nova.po28
-rw-r--r--nova/locale/zh_TW/LC_MESSAGES/nova.po28
-rw-r--r--nova/objects/instance.py7
-rw-r--r--nova/privsep/libvirt.py39
-rw-r--r--nova/scheduler/client/report.py61
-rw-r--r--nova/scheduler/utils.py7
-rw-r--r--nova/tests/fixtures.py33
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/traits.yaml82
-rw-r--r--nova/tests/functional/api/openstack/placement/test_report_client.py26
-rw-r--r--nova/tests/functional/regressions/test_bug_1404867.py107
-rw-r--r--nova/tests/functional/regressions/test_bug_1670627.py1
-rw-r--r--nova/tests/functional/regressions/test_bug_1689692.py1
-rw-r--r--nova/tests/functional/test_servers.py28
-rw-r--r--nova/tests/functional/wsgi/test_servers.py79
-rw-r--r--nova/tests/unit/api/openstack/compute/test_serversV21.py49
-rw-r--r--nova/tests/unit/compute/test_compute.py1
-rw-r--r--nova/tests/unit/compute/test_compute_api.py213
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py28
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py7
-rw-r--r--nova/tests/unit/compute/test_shelve.py85
-rw-r--r--nova/tests/unit/conductor/test_conductor.py26
-rw-r--r--nova/tests/unit/objects/test_instance.py16
-rw-r--r--nova/tests/unit/scheduler/client/test_report.py174
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py39
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py14
-rw-r--r--nova/virt/ironic/driver.py2
-rw-r--r--nova/virt/libvirt/driver.py6
-rw-r--r--nova/virt/libvirt/vif.py15
44 files changed, 1103 insertions, 526 deletions
diff --git a/doc/source/admin/remote-console-access.rst b/doc/source/admin/remote-console-access.rst
index 74a893d4bc..fa330172d9 100644
--- a/doc/source/admin/remote-console-access.rst
+++ b/doc/source/admin/remote-console-access.rst
@@ -334,6 +334,11 @@ service. As root, run the following command:
# apt-get install nova-novncproxy
+.. note::
+
+ If using non-US key mappings, then you need at least noVNC 1.0.0 for `a fix
+ <https://github.com/novnc/noVNC/commit/99feba6ba8fee5b3a2b2dc99dc25e9179c560d31>`_.
+
The service starts automatically on installation.
To restart the service, run:
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 96828de87c..fe8c7af802 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -1346,8 +1346,17 @@ class API(base.Base):
# compatibility can be removed after Ocata EOL.
self._check_attach(context, volume, instance)
bdm.volume_size = volume.get('size')
+
+ # NOTE(mnaser): If we end up reserving the volume, it will
+ # not have an attachment_id which is needed
+ # for cleanups. This can be removed once
+ # all calls to reserve_volume are gone.
+ if 'attachment_id' not in bdm:
+ bdm.attachment_id = None
except (exception.CinderConnectionFailed,
- exception.InvalidVolume):
+ exception.InvalidVolume,
+ exception.MultiattachNotSupportedOldMicroversion,
+ exception.MultiattachSupportNotYetAvailable):
raise
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
@@ -1772,6 +1781,11 @@ class API(base.Base):
# instance is now in a cell and the delete needs to proceed
# normally.
return False
+
+ # We need to detach from any volumes so they aren't orphaned.
+ self._local_cleanup_bdm_volumes(
+ build_req.block_device_mappings, instance, context)
+
return True
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
@@ -1780,12 +1794,12 @@ class API(base.Base):
return
cell = None
- # If there is an instance.host (or the instance is shelved-offloaded),
- # the instance has been scheduled and sent to a cell/compute which
- # means it was pulled from the cell db.
+ # If there is an instance.host (or the instance is shelved-offloaded or
+ # in error state), the instance has been scheduled and sent to a
+ # cell/compute which means it was pulled from the cell db.
# Normal delete should be attempted.
- if not (instance.host or
- instance.vm_state == vm_states.SHELVED_OFFLOADED):
+ may_have_ports_or_volumes = self._may_have_ports_or_volumes(instance)
+ if not instance.host and not may_have_ports_or_volumes:
try:
if self._delete_while_booting(context, instance):
return
@@ -1864,9 +1878,7 @@ class API(base.Base):
# which will cause a cast to the child cell.
cb(context, instance, bdms)
return
- shelved_offloaded = (instance.vm_state
- == vm_states.SHELVED_OFFLOADED)
- if not instance.host and not shelved_offloaded:
+ if not instance.host and not may_have_ports_or_volumes:
try:
with compute_utils.notify_about_instance_delete(
self.notifier, context, instance,
@@ -1879,7 +1891,12 @@ class API(base.Base):
{'state': instance.vm_state},
instance=instance)
return
- except exception.ObjectActionError:
+ except exception.ObjectActionError as ex:
+ # The instance's host likely changed under us as
+ # this instance could be building and has since been
+ # scheduled. Continue with attempts to delete it.
+ LOG.debug('Refreshing instance because: %s', ex,
+ instance=instance)
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
@@ -1887,7 +1904,8 @@ class API(base.Base):
is_local_delete = True
try:
- if not shelved_offloaded:
+ # instance.host must be set in order to look up the service.
+ if instance.host is not None:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
is_local_delete = not self.servicegroup_api.service_is_up(
@@ -1904,7 +1922,9 @@ class API(base.Base):
cb(context, instance, bdms)
except exception.ComputeHostNotFound:
- pass
+ LOG.debug('Compute host %s not found during service up check, '
+ 'going to local delete instance', instance.host,
+ instance=instance)
if is_local_delete:
# If instance is in shelved_offloaded state or compute node
@@ -1931,6 +1951,16 @@ class API(base.Base):
# NOTE(comstud): Race condition. Instance already gone.
pass
+ def _may_have_ports_or_volumes(self, instance):
+ # NOTE(melwitt): When an instance build fails in the compute manager,
+ # the instance host and node are set to None and the vm_state is set
+ # to ERROR. In the case, the instance with host = None has actually
+ # been scheduled and may have ports and/or volumes allocated on the
+ # compute node.
+ if instance.vm_state in (vm_states.SHELVED_OFFLOADED, vm_states.ERROR):
+ return True
+ return False
+
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
@@ -1986,6 +2016,14 @@ class API(base.Base):
'the instance host %(instance_host)s.',
{'connector_host': connector.get('host'),
'instance_host': instance.host}, instance=instance)
+ if (instance.host is None and
+ self._may_have_ports_or_volumes(instance)):
+ LOG.debug('Allowing use of stashed volume connector with '
+ 'instance host None because instance with '
+ 'vm_state %(vm_state)s has been scheduled in '
+ 'the past.', {'vm_state': instance.vm_state},
+ instance=instance)
+ return connector
def _local_cleanup_bdm_volumes(self, bdms, instance, context):
"""The method deletes the bdm records and, if a bdm is a volume, call
@@ -2020,7 +2058,12 @@ class API(base.Base):
except Exception as exc:
LOG.warning("Ignoring volume cleanup failure due to %s",
exc, instance=instance)
- bdm.destroy()
+ # If we're cleaning up volumes from an instance that wasn't yet
+ # created in a cell, i.e. the user deleted the server while
+ # the BuildRequest still existed, then the BDM doesn't actually
+ # exist in the DB to destroy it.
+ if 'id' in bdm:
+ bdm.destroy()
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 722d8c5e50..8a27aa4709 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -697,7 +697,7 @@ class ComputeManager(manager.Manager):
cn_uuid = compute_nodes[migration.source_node]
if not scheduler_utils.remove_allocation_from_compute(
- instance, cn_uuid, self.reportclient):
+ context, instance, cn_uuid, self.reportclient):
LOG.error("Failed to clean allocation of evacuated instance "
"on the source node %s",
cn_uuid, instance=instance)
@@ -2902,7 +2902,7 @@ class ComputeManager(manager.Manager):
# on the same host (not evacuate) uses the NopClaim which will
# not raise ComputeResourcesUnavailable.
rt.delete_allocation_for_evacuated_instance(
- instance, scheduled_node, node_type='destination')
+ context, instance, scheduled_node, node_type='destination')
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=e.format_message())
@@ -2916,7 +2916,8 @@ class ComputeManager(manager.Manager):
self._set_migration_status(migration, 'failed')
if recreate or scheduled_node is not None:
rt.delete_allocation_for_evacuated_instance(
- instance, scheduled_node, node_type='destination')
+ context, instance, scheduled_node,
+ node_type='destination')
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise
else:
@@ -3832,7 +3833,7 @@ class ComputeManager(manager.Manager):
# any shared providers in the case of a confirm_resize operation and
# the source host and shared providers for a revert_resize operation..
if not scheduler_utils.remove_allocation_from_compute(
- instance, cn_uuid, self.reportclient, flavor):
+ context, instance, cn_uuid, self.reportclient, flavor):
LOG.error("Failed to save manipulated allocation",
instance=instance)
@@ -4213,6 +4214,15 @@ class ComputeManager(manager.Manager):
This is initiated from the destination host's ``prep_resize`` routine
and runs on the source host.
"""
+ try:
+ self._resize_instance(context, instance, image, migration,
+ instance_type, clean_shutdown)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self._revert_allocation(context, instance, migration)
+
+ def _resize_instance(self, context, instance, image,
+ migration, instance_type, clean_shutdown):
with self._error_out_instance_on_exception(context, instance), \
errors_out_migration_ctxt(migration):
network_info = self.network_api.get_instance_nw_info(context,
@@ -4438,6 +4448,20 @@ class ComputeManager(manager.Manager):
new host machine.
"""
+ try:
+ self._finish_resize_helper(context, disk_info, image, instance,
+ migration)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self._revert_allocation(context, instance, migration)
+
+ def _finish_resize_helper(self, context, disk_info, image, instance,
+ migration):
+ """Completes the migration process.
+
+ The caller must revert the instance's allocations if the migration
+ process failed.
+ """
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -4914,8 +4938,11 @@ class ComputeManager(manager.Manager):
# instance claim will not remove the allocations.
rt.reportclient.delete_allocation_for_instance(context,
instance.uuid)
- # FIXME: Umm, shouldn't we be rolling back volume connections
- # and port bindings?
+ # FIXME: Umm, shouldn't we be rolling back port bindings too?
+ self._terminate_volume_connections(context, instance, bdms)
+ # The reverts_task_state decorator on unshelve_instance will
+ # eventually save these updates.
+ self._nil_out_instance_obj_host_and_node(instance)
if image:
instance.image_ref = shelved_image_ref
@@ -6294,7 +6321,7 @@ class ComputeManager(manager.Manager):
# attempt to clean up any doubled per-instance allocation
rt = self._get_resource_tracker()
rt.delete_allocation_for_migrated_instance(
- instance, source_node)
+ ctxt, instance, source_node)
def _consoles_enabled(self):
"""Returns whether a console is enable."""
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index ef2b26b44b..bedcc54605 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -894,7 +894,7 @@ class ResourceTracker(object):
# that the resource provider exists in the tree and has had its
# cached traits refreshed.
self.reportclient.set_traits_for_provider(
- compute_node.uuid, traits)
+ context, compute_node.uuid, traits)
if self.pci_tracker:
self.pci_tracker.save(context)
@@ -1316,27 +1316,30 @@ class ResourceTracker(object):
"host that might need to be removed: %s.",
instance_uuid, instance.host, instance.node, alloc)
- def delete_allocation_for_evacuated_instance(self, instance, node,
+ def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
self._delete_allocation_for_moved_instance(
- instance, node, 'evacuated', node_type)
+ context, instance, node, 'evacuated', node_type)
- def delete_allocation_for_migrated_instance(self, instance, node):
- self._delete_allocation_for_moved_instance(instance, node, 'migrated')
+ def delete_allocation_for_migrated_instance(self, context, instance, node):
+ self._delete_allocation_for_moved_instance(context, instance, node,
+ 'migrated')
def _delete_allocation_for_moved_instance(
- self, instance, node, move_type, node_type='source'):
+ self, context, instance, node, move_type, node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not scheduler_utils.remove_allocation_from_compute(
- instance, cn_uuid, self.reportclient):
+ context, instance, cn_uuid, self.reportclient):
LOG.error("Failed to clean allocation of %s "
"instance on the %s node %s",
move_type, node_type, cn_uuid, instance=instance)
- def delete_allocation_for_failed_resize(self, instance, node, flavor):
+ def delete_allocation_for_failed_resize(self, context, instance, node,
+ flavor):
"""Delete instance allocations for the node during a failed resize
+ :param context: The request context.
:param instance: The instance being resized/migrated.
:param node: The node provider on which the instance should have
allocations to remove. If this is a resize to the same host, then
@@ -1345,7 +1348,7 @@ class ResourceTracker(object):
"""
cn = self.compute_nodes[node]
if not scheduler_utils.remove_allocation_from_compute(
- instance, cn.uuid, self.reportclient, flavor):
+ context, instance, cn.uuid, self.reportclient, flavor):
if instance.instance_type_id == flavor.id:
operation = 'migration'
else:
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index f0327422fd..c0f99fb607 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -1034,7 +1034,8 @@ class ComputeTaskManager(base.Base):
return tags
def _bury_in_cell0(self, context, request_spec, exc,
- build_requests=None, instances=None):
+ build_requests=None, instances=None,
+ block_device_mapping=None):
"""Ensure all provided build_requests and instances end up in cell0.
Cell0 is the fake cell we schedule dead instances to when we can't
@@ -1070,6 +1071,14 @@ class ComputeTaskManager(base.Base):
for instance in instances_by_uuid.values():
with obj_target_cell(instance, cell0) as cctxt:
instance.create()
+
+ # NOTE(mnaser): In order to properly clean-up volumes after
+ # being buried in cell0, we need to store BDMs.
+ if block_device_mapping:
+ self._create_block_device_mapping(
+ cell0, instance.flavor, instance.uuid,
+ block_device_mapping)
+
# Use the context targeted to cell0 here since the instance is
# now in cell0.
self._set_vm_state_and_notify(
@@ -1108,7 +1117,8 @@ class ComputeTaskManager(base.Base):
except Exception as exc:
LOG.exception('Failed to schedule instances')
self._bury_in_cell0(context, request_specs[0], exc,
- build_requests=build_requests)
+ build_requests=build_requests,
+ block_device_mapping=block_device_mapping)
return
host_mapping_cache = {}
@@ -1131,9 +1141,10 @@ class ComputeTaskManager(base.Base):
LOG.error('No host-to-cell mapping found for selected '
'host %(host)s. Setup is incomplete.',
{'host': host.service_host})
- self._bury_in_cell0(context, request_spec, exc,
- build_requests=[build_request],
- instances=[instance])
+ self._bury_in_cell0(
+ context, request_spec, exc,
+ build_requests=[build_request], instances=[instance],
+ block_device_mapping=block_device_mapping)
# This is a placeholder in case the quota recheck fails.
instances.append(None)
continue
diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py
index 77400ea7a9..52383acd73 100644
--- a/nova/conductor/tasks/live_migrate.py
+++ b/nova/conductor/tasks/live_migrate.py
@@ -376,8 +376,8 @@ class LiveMigrationTask(base.TaskBase):
# allocated for the given (destination) node.
self.scheduler_client.reportclient.\
remove_provider_from_instance_allocation(
- self.instance.uuid, compute_node.uuid, self.instance.user_id,
- self.instance.project_id, resources)
+ self.context, self.instance.uuid, compute_node.uuid,
+ self.instance.user_id, self.instance.project_id, resources)
def _check_not_over_max_retries(self, attempted_hosts):
if CONF.migrate_max_retries == -1:
diff --git a/nova/locale/cs/LC_MESSAGES/nova.po b/nova/locale/cs/LC_MESSAGES/nova.po
index 9acc2d1fab..ea683b8a09 100644
--- a/nova/locale/cs/LC_MESSAGES/nova.po
+++ b/nova/locale/cs/LC_MESSAGES/nova.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -21,7 +21,7 @@ msgstr ""
"Language: cs\n"
"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Czech\n"
#, python-format
@@ -97,18 +97,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "Hypervizor %(type)s nepodporuje zařízení PCI"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s musí být <= %(max_value)d"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s musí být >= %(min_value)d"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s musí být celé číslo"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
"Hodnota %(worker_name)s ve %(workers)s je neplatná, musí být větší než 0"
@@ -1319,10 +1307,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "Hypervizor s ID !%s! nemohl být nalezen."
#, python-format
-msgid "Hypervisor: %s"
-msgstr "Hypervizor: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "Přidělení IP adres přesahující kvótu v zásobě %s."
@@ -3675,9 +3659,6 @@ msgstr "fmt=%(fmt)s zálohováno: %(backing_file)s"
msgid "fping utility is not found."
msgstr "nástroj fping nenalezen."
-msgid "host"
-msgstr "Hostitel"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s neobsahuje verzi"
@@ -3902,6 +3883,3 @@ msgstr "nemůžete předat av_zone, pokud je zaměření veřejné"
msgid "you can not pass project if the scope is private"
msgstr "nemůžete předat projekt, pokud je zaměření soukromé"
-
-msgid "zone"
-msgstr "Zóna"
diff --git a/nova/locale/de/LC_MESSAGES/nova.po b/nova/locale/de/LC_MESSAGES/nova.po
index fecab2af0e..d325b440dd 100644
--- a/nova/locale/de/LC_MESSAGES/nova.po
+++ b/nova/locale/de/LC_MESSAGES/nova.po
@@ -15,7 +15,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -25,7 +25,7 @@ msgstr ""
"Language-Team: German\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"Generated-By: Babel 2.2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
#, python-format
msgid "%(address)s is not a valid IP address."
@@ -116,18 +116,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "%(type)s Hypervisor unterstützt PCI Gerät nicht"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s muss <= %(max_value)d sein"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s muss >= %(min_value)d sein"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s muss eine Ganzzahl sein"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
"Wert %(worker_name)s von %(workers)s ist ungültig; muss größer als 0 sein"
@@ -1515,10 +1503,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "Hypervisor mit ID '%s' konnte nicht gefunden werden. "
#, python-format
-msgid "Hypervisor: %s"
-msgstr "Hypervisor: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "IP-Zuordnung über Quote in Pool %s."
@@ -4155,9 +4139,6 @@ msgstr "fmt=%(fmt)s gesichert durch: %(backing_file)s"
msgid "fping utility is not found."
msgstr "fping-Dienstprogramm wurde nicht gefunden."
-msgid "host"
-msgstr "Host"
-
#, python-format
msgid "href %s does not contain version"
msgstr "Hyperlink %s enthält Version nicht"
@@ -4428,6 +4409,3 @@ msgstr "Sie können av_zone nicht übergeben, wenn der Bereich öffentlich ist"
msgid "you can not pass project if the scope is private"
msgstr "Sie können das Projekt nicht übergeben, wenn der Bereich privat ist"
-
-msgid "zone"
-msgstr "Zone"
diff --git a/nova/locale/es/LC_MESSAGES/nova.po b/nova/locale/es/LC_MESSAGES/nova.po
index 0867dfb4f5..97974ef5a3 100644
--- a/nova/locale/es/LC_MESSAGES/nova.po
+++ b/nova/locale/es/LC_MESSAGES/nova.po
@@ -16,7 +16,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -25,7 +25,7 @@ msgstr ""
"Language: es\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Spanish\n"
#, python-format
@@ -116,18 +116,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "El hipervisor %(type)s no soporta dispositivos PCI"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s debe ser <= %(max_value)d"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s debe ser >= %(min_value)d"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s debe ser un entero"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
"El valor %(worker_name)s de %(workers)s es inválido, debe ser mayor que 0."
@@ -1472,10 +1460,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "El hipervisor con el ID '%s' no se ha podido encontrar. "
#, python-format
-msgid "Hypervisor: %s"
-msgstr "Hipervisor: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "La asignación IP excede la capacidad en pool %s."
@@ -4080,9 +4064,6 @@ msgstr "fmt=%(fmt)s respaldado por: %(backing_file)s"
msgid "fping utility is not found."
msgstr "No se encuentra el programa de utilidad fping."
-msgid "host"
-msgstr "host"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s no contiene la versión"
@@ -4354,6 +4335,3 @@ msgstr "no puede aprobar av_zone si el alcance es público"
msgid "you can not pass project if the scope is private"
msgstr "No puede aprobar el proyecto si el alcance es privado"
-
-msgid "zone"
-msgstr "zona"
diff --git a/nova/locale/fr/LC_MESSAGES/nova.po b/nova/locale/fr/LC_MESSAGES/nova.po
index 58521977da..8b955d43e8 100644
--- a/nova/locale/fr/LC_MESSAGES/nova.po
+++ b/nova/locale/fr/LC_MESSAGES/nova.po
@@ -28,7 +28,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -37,7 +37,7 @@ msgstr ""
"Language: fr\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: French\n"
#, python-format
@@ -126,18 +126,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "L'hyperviseur %(type)s ne supporte pas les périphériques PCI"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s doit etre <= %(max_value)d"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)sdoit être supérieur à %(min_value)d"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s doit etre un entier."
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
"La valeur %(worker_name)s de %(workers)s est invalide, elle doit être "
@@ -1492,10 +1480,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "L'hyperviseur avec l'ID '%s' est introuvable."
#, python-format
-msgid "Hypervisor: %s"
-msgstr "Hyperviseur: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "L'allocation IP dépasse le quota dans le pool %s."
@@ -4092,9 +4076,6 @@ msgstr "fmt=%(fmt)s sauvegardé par : %(backing_file)s"
msgid "fping utility is not found."
msgstr "L'utilitaire fping est introuvable."
-msgid "host"
-msgstr "host"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s ne contient pas de version"
@@ -4368,6 +4349,3 @@ msgstr "Vous ne pouvez passer av_zone si le périmètre est public"
msgid "you can not pass project if the scope is private"
msgstr "Vous ne pouvez passer un projet si le périmètre est privé"
-
-msgid "zone"
-msgstr "zone"
diff --git a/nova/locale/it/LC_MESSAGES/nova.po b/nova/locale/it/LC_MESSAGES/nova.po
index dd86026c5d..efc150d4bf 100644
--- a/nova/locale/it/LC_MESSAGES/nova.po
+++ b/nova/locale/it/LC_MESSAGES/nova.po
@@ -13,7 +13,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -22,7 +22,7 @@ msgstr ""
"Language: it\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Italian\n"
#, python-format
@@ -114,18 +114,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "l'hypervisor %(type)s non supporta i dispositivi PCI"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s deve essere <= %(max_value)d"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s deve essere >= %(min_value)d"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s deve essere un numero intero"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
"Il valore %(worker_name)s di %(workers)s non è valido, deve essere maggiore "
@@ -1470,10 +1458,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "Impossibile trovare hypervisor con ID '%s'."
#, python-format
-msgid "Hypervisor: %s"
-msgstr "Hypervisor: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "L'allocazione IP supera la quota nel pool %s."
@@ -4054,9 +4038,6 @@ msgstr "fmt=%(fmt)s sottoposto a backup da: %(backing_file)s"
msgid "fping utility is not found."
msgstr "l'utilità fping non è stata trovata."
-msgid "host"
-msgstr "host"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s non contiene la versione"
@@ -4323,6 +4304,3 @@ msgstr "non è possibile passare av_zone se l'ambito è pubblico"
msgid "you can not pass project if the scope is private"
msgstr "non è possibile passare il progetto se l'ambito è privato"
-
-msgid "zone"
-msgstr "Zona"
diff --git a/nova/locale/ja/LC_MESSAGES/nova.po b/nova/locale/ja/LC_MESSAGES/nova.po
index a08c134cb5..a3db657262 100644
--- a/nova/locale/ja/LC_MESSAGES/nova.po
+++ b/nova/locale/ja/LC_MESSAGES/nova.po
@@ -13,7 +13,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -22,7 +22,7 @@ msgstr ""
"Language: ja\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Japanese\n"
#, python-format
@@ -113,18 +113,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "%(type)s ハイパーバイザーは PCI デバイスをサポートしていません"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s は <= %(max_value)d 以下でなければなりません"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s は %(min_value)d 以上でなければなりません"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s は整数でなければなりません"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
"%(workers)s の %(worker_name)s 値が無効です。0 より大きい値にしなければなりま"
@@ -1454,10 +1442,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "ID '%s' のハイパーバイザーが見つかりませんでした。"
#, python-format
-msgid "Hypervisor: %s"
-msgstr "ハイパーバイザー: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "IP の割り当て量がプール %s 内のクォータを超えています。"
@@ -4032,9 +4016,6 @@ msgstr "fmt=%(fmt)s は %(backing_file)s でサポートされています"
msgid "fping utility is not found."
msgstr "fping ユーティリティーが見つかりません。"
-msgid "host"
-msgstr "ホスト"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s にバージョンが含まれていません"
@@ -4298,6 +4279,3 @@ msgstr "スコープがパブリックの場合、av_zone を渡すことはで
msgid "you can not pass project if the scope is private"
msgstr "スコープがプライベートである場合、プロジェクトを渡すことはできません"
-
-msgid "zone"
-msgstr "ゾーン"
diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova.po b/nova/locale/ko_KR/LC_MESSAGES/nova.po
index 9ba0e648dc..5290595cd5 100644
--- a/nova/locale/ko_KR/LC_MESSAGES/nova.po
+++ b/nova/locale/ko_KR/LC_MESSAGES/nova.po
@@ -14,16 +14,16 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-03-02 12:00+0000\n"
"Last-Translator: Ian Y. Choi <ianyrchoi@gmail.com>\n"
-"Language: ko-KR\n"
+"Language: ko_KR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Korean (South Korea)\n"
#, python-format
@@ -111,18 +111,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "%(type)s 하이퍼바이저가 PCI 디바이스를 지원하지 않음"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s은(는) %(max_value)d보다 작거나 같아야 함"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s은(는) >= %(min_value)d이어야 함. "
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s은(는) 정수여야 함"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
"%(workers)s의 %(worker_name)s 값이 올바르지 않습니다. 해당 값은 0보다 커야 합"
@@ -1444,10 +1432,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "ID가 '%s'인 하이퍼바이저를 찾을 수 없습니다. "
#, python-format
-msgid "Hypervisor: %s"
-msgstr "하이퍼바이저: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "%s 풀에서 IP 할당이 할당량을 초과했습니다."
@@ -4007,9 +3991,6 @@ msgstr "fmt=%(fmt)s 백업: %(backing_file)s"
msgid "fping utility is not found."
msgstr "fping 유틸리티를 찾을 수 없습니다. "
-msgid "host"
-msgstr "호스트"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s에 버전이 없음"
@@ -4263,6 +4244,3 @@ msgstr "공용 범위인 경우 av_zone을 전달할 수 없음"
msgid "you can not pass project if the scope is private"
msgstr "개인용 범위인 경우 프로젝트를 전달할 수 없음"
-
-msgid "zone"
-msgstr "영역"
diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova.po b/nova/locale/pt_BR/LC_MESSAGES/nova.po
index 0778cdae62..e3b1f79003 100644
--- a/nova/locale/pt_BR/LC_MESSAGES/nova.po
+++ b/nova/locale/pt_BR/LC_MESSAGES/nova.po
@@ -19,16 +19,16 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 06:08+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language: pt-BR\n"
+"Language: pt_BR\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Portuguese (Brazil)\n"
#, python-format
@@ -117,18 +117,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "O hypervisor %(type)s não suporta dispositivos PCI"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s deve ser <= %(max_value)d"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s deve ser >= %(min_value)d"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s deve ser um número inteiro"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr "%(worker_name)s valor de %(workers)s é inválido, deve ser maior que 0"
@@ -1456,10 +1444,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "O hypervisor com o ID '%s' não pôde ser localizado."
#, python-format
-msgid "Hypervisor: %s"
-msgstr "Hypervisor: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "Alocação de IP de cota no conjunto %s."
@@ -4022,9 +4006,6 @@ msgstr "fmt=%(fmt)s retornado por: %(backing_file)s"
msgid "fping utility is not found."
msgstr "utilitário fping não localizado."
-msgid "host"
-msgstr "host"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s não contém versão"
@@ -4296,6 +4277,3 @@ msgstr "não será possível aprovar av_zone se o escopo for público"
msgid "you can not pass project if the scope is private"
msgstr "não será possível aprovar o projeto se o escopo for privado"
-
-msgid "zone"
-msgstr "zona"
diff --git a/nova/locale/ru/LC_MESSAGES/nova.po b/nova/locale/ru/LC_MESSAGES/nova.po
index 4b7bcdd58f..ecfe7529e3 100644
--- a/nova/locale/ru/LC_MESSAGES/nova.po
+++ b/nova/locale/ru/LC_MESSAGES/nova.po
@@ -16,7 +16,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -27,7 +27,7 @@ msgstr ""
"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
"%100>=11 && n%100<=14)? 2 : 3);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Russian\n"
#, python-format
@@ -117,18 +117,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "Гипервизор %(type)s не поддерживает устройства PCI"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s должно быть <= %(max_value)d"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s должен быть >= %(min_value)d"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s должен быть целым числом"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
"Значение %(worker_name)s, равное %(workers)s, является недопустимым. "
@@ -1462,10 +1450,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "Гипервизор с ИД '%s' не найден."
#, python-format
-msgid "Hypervisor: %s"
-msgstr "Гипервизор: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "Превышение квоты выделения IP-адресов в пуле %s."
@@ -3994,9 +3978,6 @@ msgstr "fmt=%(fmt)s backed by: %(backing_file)s"
msgid "fping utility is not found."
msgstr "Утилита fping не найдена."
-msgid "host"
-msgstr "Узел"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s не содержит версию"
@@ -4264,6 +4245,3 @@ msgstr "Нельзя запустить av_zone, если область явл
msgid "you can not pass project if the scope is private"
msgstr "Нельзя запустить проект, если область является частной"
-
-msgid "zone"
-msgstr "Зона"
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova.po b/nova/locale/tr_TR/LC_MESSAGES/nova.po
index d5ff8e288b..e455829bfe 100644
--- a/nova/locale/tr_TR/LC_MESSAGES/nova.po
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova.po
@@ -10,16 +10,16 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 06:09+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language: tr-TR\n"
+"Language: tr_TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Turkish (Turkey)\n"
#, python-format
@@ -70,18 +70,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "%(type)s hipervizörü PCI aygıtlarını desteklemiyor"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s <= %(max_value)d olmalı"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s >= %(min_value)d olmalı"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s tam sayı olmalı"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr "%(workers)s in %(worker_name)s değeri geçersiz, 0'dan büyük olmalı"
@@ -1121,10 +1109,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "'%s' kimlikli hipervizör bulunamadı."
#, python-format
-msgid "Hypervisor: %s"
-msgstr "Hipervizör: %s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "%s havuzundaki IP ayırma kota üzerinde."
@@ -3239,9 +3223,6 @@ msgstr "%(backing_file)s tarafından desteklenen fmt=%(fmt)s"
msgid "fping utility is not found."
msgstr "fping aracı bulunamadı."
-msgid "host"
-msgstr "Host"
-
#, python-format
msgid "href %s does not contain version"
msgstr "%s referansı versiyon içermiyor"
@@ -3459,6 +3440,3 @@ msgstr "kapsam genelse av_zone geçiremezsiniz"
msgid "you can not pass project if the scope is private"
msgstr "kapsam özel ise proje geçiremezsiniz"
-
-msgid "zone"
-msgstr "Bölge"
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova.po b/nova/locale/zh_CN/LC_MESSAGES/nova.po
index d08109bcc4..968d4f17d7 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova.po
@@ -35,17 +35,17 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-06-24 04:35+0000\n"
"Last-Translator: blkart <blkart.org@gmail.com>\n"
-"Language: zh-CN\n"
+"Language: zh_CN\n"
"Language-Team: Chinese (China)\n"
"Plural-Forms: nplurals=1; plural=0\n"
"Generated-By: Babel 2.2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
msgid "\"Look for the VDIs failed"
msgstr "查找VDI失败"
@@ -134,18 +134,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "%(type)s监测器不支持PCI设备"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s 必须小于或等于 %(max_value)d"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s 必须大于或等于 %(min_value)d"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s 必须为整数"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr "工作线程%(worker_name)s的数量%(workers)s非法,必须大于0"
@@ -1442,10 +1430,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "找不到具有标识“%s”的管理程序。"
#, python-format
-msgid "Hypervisor: %s"
-msgstr "监测器:%s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "IP分配操作池%s的配额。"
@@ -3976,9 +3960,6 @@ msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持"
msgid "fping utility is not found."
msgstr "找不到 fping 实用程序。"
-msgid "host"
-msgstr "主机"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s 不包含版本"
@@ -4232,6 +4213,3 @@ msgstr "如范围是公共的,您不能跨过av_zone"
msgid "you can not pass project if the scope is private"
msgstr "如果范围是私有的,您不能跨过这个项目"
-
-msgid "zone"
-msgstr "域"
diff --git a/nova/locale/zh_TW/LC_MESSAGES/nova.po b/nova/locale/zh_TW/LC_MESSAGES/nova.po
index d7602d0ce1..f83415cdb6 100644
--- a/nova/locale/zh_TW/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_TW/LC_MESSAGES/nova.po
@@ -12,16 +12,16 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2018-01-03 23:41+0000\n"
+"POT-Creation-Date: 2018-02-28 15:53+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 06:10+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language: zh-TW\n"
+"Language: zh_TW\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Chinese (Taiwan)\n"
#, python-format
@@ -108,18 +108,6 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "%(type)s Hypervisor 不支援 PCI 裝置"
#, python-format
-msgid "%(value_name)s must be <= %(max_value)d"
-msgstr "%(value_name)s 必須 <= %(max_value)d"
-
-#, python-format
-msgid "%(value_name)s must be >= %(min_value)d"
-msgstr "%(value_name)s 必須 >= %(min_value)d"
-
-#, python-format
-msgid "%(value_name)s must be an integer"
-msgstr "%(value_name)s 必須是整數"
-
-#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr "%(workers)s 的 %(worker_name)s 值無效,必須大於 0"
@@ -1326,10 +1314,6 @@ msgid "Hypervisor with ID '%s' could not be found."
msgstr "找不到 ID 為 '%s' 的 Hypervisor。"
#, python-format
-msgid "Hypervisor: %s"
-msgstr "Hypervisor:%s"
-
-#, python-format
msgid "IP allocation over quota in pool %s."
msgstr "IP 配置超過儲存區 %s 中的配額。"
@@ -3735,9 +3719,6 @@ msgstr "fmt = %(fmt)s 受 %(backing_file)s 支援"
msgid "fping utility is not found."
msgstr "找不到 fping 公用程式。"
-msgid "host"
-msgstr "主機"
-
#, python-format
msgid "href %s does not contain version"
msgstr "href %s 不包含版本"
@@ -3984,6 +3965,3 @@ msgstr "如果範圍是公用的,則您無法傳遞 av_zone"
msgid "you can not pass project if the scope is private"
msgstr "如果範圍是專用的,則您無法傳遞專案"
-
-msgid "zone"
-msgstr "區域"
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index 677c7b4547..920d55c745 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -855,9 +855,10 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
self.obj_reset_changes()
def _load_generic(self, attrname):
- instance = self.__class__.get_by_uuid(self._context,
- uuid=self.uuid,
- expected_attrs=[attrname])
+ with utils.temporary_mutation(self._context, read_deleted='yes'):
+ instance = self.__class__.get_by_uuid(self._context,
+ uuid=self.uuid,
+ expected_attrs=[attrname])
# NOTE(danms): Never allow us to recursively-load
if instance.obj_attr_is_set(attrname):
diff --git a/nova/privsep/libvirt.py b/nova/privsep/libvirt.py
index 8baade3ff6..da8dccb2ef 100644
--- a/nova/privsep/libvirt.py
+++ b/nova/privsep/libvirt.py
@@ -223,22 +223,35 @@ def unplug_plumgrid_vif(dev):
@nova.privsep.sys_admin_pctxt.entrypoint
-def plug_contrail_vif(instance, vif, ip_addr, ip6_addr, ptype):
- cmd_args = ('--oper=add --uuid=%s --instance_uuid=%s --vn_uuid=%s '
- '--vm_project_uuid=%s --ip_address=%s --ipv6_address=%s'
- ' --vm_name=%s --mac=%s --tap_name=%s --port_type=%s '
- '--tx_vlan_id=%d --rx_vlan_id=%d'
- % (vif['id'], instance.uuid, vif['network']['id'],
- instance.project_id, ip_addr, ip6_addr,
- instance.display_name, vif['address'],
- vif['devname'], ptype, -1, -1))
- processutils.execute('vrouter-port-control', cmd_args)
+def plug_contrail_vif(project_id, vm_id, vm_name, vif_id, net_id, port_type,
+ dev_name, mac, ip_addr, ip6_addr):
+ cmd = (
+ 'vrouter-port-control',
+ '--oper=add',
+ '--vm_project_uuid=%s' % project_id,
+ '--instance_uuid=%s' % vm_id,
+ ' --vm_name=%s' % vm_name,
+ '--uuid=%s' % vif_id,
+ '--vn_uuid=%s' % net_id,
+ '--port_type=%s' % port_type,
+ '--tap_name=%s' % dev_name,
+ '--mac=%s' % mac,
+ '--ip_address=%s' % ip_addr,
+ '--ipv6_address=%s' % ip6_addr,
+ '--tx_vlan_id=-1',
+ '--rx_vlan_id=-1',
+ )
+ processutils.execute(*cmd)
@nova.privsep.sys_admin_pctxt.entrypoint
-def unplug_contrail_vif(vif):
- cmd_args = ('--oper=delete --uuid=%s' % (vif['id']))
- processutils.execute('vrouter-port-control', cmd_args)
+def unplug_contrail_vif(port_id):
+ cmd = (
+ 'vrouter-port-control',
+ '--oper=delete',
+ '--uuid=%s' % port_id,
+ )
+ processutils.execute(*cmd)
@nova.privsep.sys_admin_pctxt.entrypoint
diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py
index 726cb5bb4e..ab53fe17bb 100644
--- a/nova/scheduler/client/report.py
+++ b/nova/scheduler/client/report.py
@@ -19,6 +19,7 @@ import re
import time
from keystoneauth1 import exceptions as ks_exc
+import os_traits
from oslo_log import log as logging
from oslo_middleware import request_id
from oslo_utils import versionutils
@@ -458,9 +459,10 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderRetrievalFailed(uuid=uuid)
@safe_connect
- def _get_providers_in_aggregates(self, agg_uuids):
+ def _get_sharing_providers(self, agg_uuids):
"""Queries the placement API for a list of the resource providers
- associated with any of the specified aggregates.
+ associated with any of the specified aggregates and possessing the
+ MISC_SHARES_VIA_AGGREGATE trait.
:param agg_uuids: Iterable of string UUIDs of aggregates to filter on.
:return: A list of dicts of resource provider information, which may be
@@ -471,10 +473,16 @@ class SchedulerReportClient(object):
return []
qpval = ','.join(agg_uuids)
+ # TODO(efried): Need a ?having_traits=[...] on this API!
resp = self.get("/resource_providers?member_of=in:" + qpval,
version='1.3')
if resp.status_code == 200:
- return resp.json()['resource_providers']
+ rps = []
+ for rp in resp.json()['resource_providers']:
+ traits = self._get_provider_traits(rp['uuid'])
+ if os_traits.MISC_SHARES_VIA_AGGREGATE in traits:
+ rps.append(rp)
+ return rps
# Some unexpected error
placement_req_id = get_placement_request_id(resp)
@@ -769,7 +777,7 @@ class SchedulerReportClient(object):
if refresh_sharing:
# Refresh providers associated by aggregate
- for rp in self._get_providers_in_aggregates(aggs):
+ for rp in self._get_sharing_providers(aggs):
if not self._provider_tree.exists(rp['uuid']):
# NOTE(efried): Right now sharing providers are always
# treated as roots. This is deliberate. From the
@@ -1082,9 +1090,10 @@ class SchedulerReportClient(object):
self._delete_inventory(context, rp_uuid)
@safe_connect
- def _ensure_traits(self, traits):
+ def _ensure_traits(self, context, traits):
"""Make sure all specified traits exist in the placement service.
+ :param context: The security context
:param traits: Iterable of trait strings to ensure exist.
:raises: TraitCreationFailed if traits contains a trait that did not
exist in placement, and couldn't be created. When this
@@ -1110,7 +1119,8 @@ class SchedulerReportClient(object):
# Might be neat to have a batch create. But creating multiple
# traits will generally happen once, at initial startup, if at all.
for trait in traits_to_create:
- resp = self.put('/traits/' + trait, None, version='1.6')
+ resp = self.put('/traits/' + trait, None, version='1.6',
+ global_request_id=context.global_id)
if not resp:
raise exception.TraitCreationFailed(name=trait,
error=resp.text)
@@ -1128,11 +1138,12 @@ class SchedulerReportClient(object):
raise exception.TraitRetrievalFailed(error=resp.text)
@safe_connect
- def set_traits_for_provider(self, rp_uuid, traits):
+ def set_traits_for_provider(self, context, rp_uuid, traits):
"""Replace a provider's traits with those specified.
The provider must exist - this method does not attempt to create it.
+ :param context: The security context
:param rp_uuid: The UUID of the provider whose traits are to be updated
:param traits: Iterable of traits to set on the provider
:raises: ResourceProviderUpdateConflict if the provider's generation
@@ -1150,7 +1161,7 @@ class SchedulerReportClient(object):
if not self._provider_tree.have_traits_changed(rp_uuid, traits):
return
- self._ensure_traits(traits)
+ self._ensure_traits(context, traits)
url = '/resource_providers/%s/traits' % rp_uuid
# NOTE(efried): Don't use the DELETE API when traits is empty, because
@@ -1162,7 +1173,8 @@ class SchedulerReportClient(object):
'resource_provider_generation': generation,
'traits': traits,
}
- resp = self.put(url, payload, version='1.6')
+ resp = self.put(url, payload, version='1.6',
+ global_request_id=context.global_id)
if resp.status_code == 200:
json = resp.json()
@@ -1193,11 +1205,12 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderUpdateFailed(url=url, error=resp.text)
@safe_connect
- def set_aggregates_for_provider(self, rp_uuid, aggregates):
+ def set_aggregates_for_provider(self, context, rp_uuid, aggregates):
"""Replace a provider's aggregates with those specified.
The provider must exist - this method does not attempt to create it.
+ :param context: The security context
:param rp_uuid: The UUID of the provider whose aggregates are to be
updated.
:param aggregates: Iterable of aggregates to set on the provider.
@@ -1206,7 +1219,8 @@ class SchedulerReportClient(object):
# TODO(efried): Handle generation conflicts when supported by placement
url = '/resource_providers/%s/aggregates' % rp_uuid
aggregates = list(aggregates) if aggregates else []
- resp = self.put(url, aggregates, version='1.1')
+ resp = self.put(url, aggregates, version='1.1',
+ global_request_id=context.global_id)
if resp.status_code == 200:
placement_aggs = resp.json()['aggregates']
@@ -1368,7 +1382,7 @@ class SchedulerReportClient(object):
return allocations.get(
rp_uuid, {}).get('resources', {})
- def _allocate_for_instance(self, rp_uuid, instance):
+ def _allocate_for_instance(self, context, rp_uuid, instance):
my_allocations = _instance_to_allocations_dict(instance)
current_allocations = self.get_allocations_for_consumer_by_provider(
rp_uuid, instance.uuid)
@@ -1382,8 +1396,9 @@ class SchedulerReportClient(object):
LOG.debug('Sending allocation for instance %s',
my_allocations,
instance=instance)
- res = self.put_allocations(rp_uuid, instance.uuid, my_allocations,
- instance.project_id, instance.user_id)
+ res = self.put_allocations(context, rp_uuid, instance.uuid,
+ my_allocations, instance.project_id,
+ instance.user_id)
if res:
LOG.info('Submitted allocation for instance', instance=instance)
@@ -1483,8 +1498,8 @@ class SchedulerReportClient(object):
return r.status_code == 204
@safe_connect
- def remove_provider_from_instance_allocation(self, consumer_uuid, rp_uuid,
- user_id, project_id,
+ def remove_provider_from_instance_allocation(self, context, consumer_uuid,
+ rp_uuid, user_id, project_id,
resources):
"""Grabs an allocation for a particular consumer UUID, strips parts of
the allocation that refer to a supplied resource provider UUID, and
@@ -1500,6 +1515,7 @@ class SchedulerReportClient(object):
subtract resources from the single allocation to ensure we do not
exceed the reserved or max_unit amounts for the resource on the host.
+ :param context: The security context
:param consumer_uuid: The instance/consumer UUID
:param rp_uuid: The UUID of the provider whose resources we wish to
remove from the consumer's allocation
@@ -1572,7 +1588,8 @@ class SchedulerReportClient(object):
LOG.debug("Sending updated allocation %s for instance %s after "
"removing resources for %s.",
new_allocs, consumer_uuid, rp_uuid)
- r = self.put(url, payload, version='1.10')
+ r = self.put(url, payload, version='1.10',
+ global_request_id=context.global_id)
if r.status_code != 204:
LOG.warning("Failed to save allocation for %s. Got HTTP %s: %s",
consumer_uuid, r.status_code, r.text)
@@ -1648,8 +1665,8 @@ class SchedulerReportClient(object):
@safe_connect
@retries
- def put_allocations(self, rp_uuid, consumer_uuid, alloc_data, project_id,
- user_id):
+ def put_allocations(self, context, rp_uuid, consumer_uuid, alloc_data,
+ project_id, user_id):
"""Creates allocation records for the supplied instance UUID against
the supplied resource provider.
@@ -1657,6 +1674,7 @@ class SchedulerReportClient(object):
Once shared storage and things like NUMA allocations are a
reality, this will change to allocate against multiple providers.
+ :param context: The security context
:param rp_uuid: The UUID of the resource provider to allocate against.
:param consumer_uuid: The instance's UUID.
:param alloc_data: Dict, keyed by resource class, of amounts to
@@ -1680,7 +1698,8 @@ class SchedulerReportClient(object):
'user_id': user_id,
}
url = '/allocations/%s' % consumer_uuid
- r = self.put(url, payload, version='1.8')
+ r = self.put(url, payload, version='1.8',
+ global_request_id=context.global_id)
if r.status_code == 406:
# microversion 1.8 not available so try the earlier way
# TODO(melwitt): Remove this when we can be sure all placement
@@ -1726,7 +1745,7 @@ class SchedulerReportClient(object):
def update_instance_allocation(self, context, compute_node, instance,
sign):
if sign > 0:
- self._allocate_for_instance(compute_node.uuid, instance)
+ self._allocate_for_instance(context, compute_node.uuid, instance)
else:
self.delete_allocation_for_instance(context, instance.uuid)
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index 01591c5d64..190d8dd87b 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -797,10 +797,11 @@ def claim_resources(ctx, client, spec_obj, instance_uuid, alloc_req,
user_id, allocation_request_version=allocation_request_version)
-def remove_allocation_from_compute(instance, compute_node_uuid, reportclient,
- flavor=None):
+def remove_allocation_from_compute(context, instance, compute_node_uuid,
+ reportclient, flavor=None):
"""Removes the instance allocation from the compute host.
+ :param context: The request context
:param instance: the instance object owning the allocation
:param compute_node_uuid: the UUID of the compute node where the allocation
needs to be removed
@@ -817,5 +818,5 @@ def remove_allocation_from_compute(instance, compute_node_uuid, reportclient,
my_resources = resources_from_flavor(instance, flavor)
return reportclient.remove_provider_from_instance_allocation(
- instance.uuid, compute_node_uuid, instance.user_id,
+ context, instance.uuid, compute_node_uuid, instance.user_id,
instance.project_id, my_resources)
diff --git a/nova/tests/fixtures.py b/nova/tests/fixtures.py
index 4dc2c892c4..0871e077e4 100644
--- a/nova/tests/fixtures.py
+++ b/nova/tests/fixtures.py
@@ -1318,6 +1318,7 @@ class CinderFixture(fixtures.Fixture):
self.swap_error = False
self.swap_volume_instance_uuid = None
self.swap_volume_instance_error_uuid = None
+ self.reserved_volumes = list()
# This is a map of instance UUIDs mapped to a list of volume IDs.
# This map gets updated on attach/detach operations.
self.attachments = collections.defaultdict(list)
@@ -1378,8 +1379,9 @@ class CinderFixture(fixtures.Fixture):
break
else:
# This is a test that does not care about the actual details.
+ reserved_volume = (volume_id in self.reserved_volumes)
volume = {
- 'status': 'available',
+ 'status': 'attaching' if reserved_volume else 'available',
'display_name': 'TEST2',
'attach_status': 'detached',
'id': volume_id,
@@ -1387,12 +1389,6 @@ class CinderFixture(fixtures.Fixture):
'size': 1
}
- # update the status based on existing attachments
- has_attachment = any(
- [volume['id'] in attachments
- for attachments in self.attachments.values()])
- volume['status'] = 'attached' if has_attachment else 'detached'
-
# Check for our special image-backed volume.
if volume_id == self.IMAGE_BACKED_VOL:
# Make it a bootable volume.
@@ -1415,7 +1411,16 @@ class CinderFixture(fixtures.Fixture):
new_volume_id, error):
return {'save_volume_id': new_volume_id}
+ def fake_reserve_volume(self_api, context, volume_id):
+ self.reserved_volumes.append(volume_id)
+
def fake_unreserve_volume(self_api, context, volume_id):
+ # NOTE(mnaser): It's possible that we unreserve a volume that was
+ # never reserved (ex: instance.volume_attach.error
+ # notification tests)
+ if volume_id in self.reserved_volumes:
+ self.reserved_volumes.remove(volume_id)
+
# Signaling that swap_volume has encountered the error
# from initialize_connection and is working on rolling back
# the reservation on SWAP_ERR_NEW_VOL.
@@ -1437,6 +1442,12 @@ class CinderFixture(fixtures.Fixture):
def fake_detach(_self, context, volume_id, instance_uuid=None,
attachment_id=None):
+ # NOTE(mnaser): It's possible that we unreserve a volume that was
+ # never reserved (ex: instance.volume_attach.error
+ # notification tests)
+ if volume_id in self.reserved_volumes:
+ self.reserved_volumes.remove(volume_id)
+
if instance_uuid is not None:
# If the volume isn't attached to this instance it will
# result in a ValueError which indicates a broken test or
@@ -1460,7 +1471,7 @@ class CinderFixture(fixtures.Fixture):
'nova.volume.cinder.API.migrate_volume_completion',
fake_migrate_volume_completion)
self.test.stub_out('nova.volume.cinder.API.reserve_volume',
- lambda *args, **kwargs: None)
+ fake_reserve_volume)
self.test.stub_out('nova.volume.cinder.API.roll_detaching',
lambda *args, **kwargs: None)
self.test.stub_out('nova.volume.cinder.API.terminate_connection',
@@ -1567,12 +1578,6 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
'size': 1
}
- # update the status based on existing attachments
- has_attachment = any(
- [volume['id'] in attachments
- for attachments in self.attachments.values()])
- volume['status'] = 'attached' if has_attachment else 'detached'
-
# Check for our special image-backed volume.
if volume_id == self.IMAGE_BACKED_VOL:
# Make it a bootable volume.
diff --git a/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml b/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml
index 0d5f911707..6312a243dc 100644
--- a/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml
+++ b/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml
@@ -28,6 +28,12 @@ tests:
response_strings:
- 'The trait is invalid. A valid trait must be no longer than 255 characters, start with the prefix \"CUSTOM_\" and use following characters: \"A\"-\"Z\", \"0\"-\"9\" and \"_\"'
+- name: create a trait earlier version
+ PUT: /traits/CUSTOM_TRAIT_1
+ request_headers:
+ openstack-api-version: placement 1.5
+ status: 404
+
- name: create a trait
PUT: /traits/CUSTOM_TRAIT_1
status: 201
@@ -47,6 +53,12 @@ tests:
response_forbidden_headers:
- content-type
+- name: get a trait earlier version
+ GET: /traits/CUSTOM_TRAIT_1
+ request_headers:
+ openstack-api-version: placement 1.5
+ status: 404
+
- name: get a trait
GET: /traits/CUSTOM_TRAIT_1
status: 204
@@ -60,6 +72,12 @@ tests:
GET: /traits/NON_EXISTED
status: 404
+- name: delete a trait earlier version
+ DELETE: /traits/CUSTOM_TRAIT_1
+ request_headers:
+ openstack-api-version: placement 1.5
+ status: 404
+
- name: delete a trait
DELETE: /traits/CUSTOM_TRAIT_1
status: 204
@@ -107,6 +125,12 @@ tests:
- MISC_SHARES_VIA_AGGREGATE
- HW_CPU_X86_SHA
+- name: list traits earlier version
+ GET: /traits
+ request_headers:
+ openstack-api-version: placement 1.5
+ status: 404
+
- name: list traits with invalid format of name parameter
GET: /traits?name=in_abc
status: 400
@@ -213,6 +237,12 @@ tests:
response_forbidden_headers:
- content-type
+- name: list traits for resource provider earlier version
+ GET: /resource_providers/$ENVIRON['RP_UUID']/traits
+ request_headers:
+ openstack-api-version: placement 1.5
+ status: 404
+
- name: list traits for resource provider without traits
GET: /resource_providers/$ENVIRON['RP_UUID']/traits
status: 200
@@ -224,6 +254,13 @@ tests:
- cache-control
- last-modified
+- name: set traits for resource provider earlier version
+ PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
+ request_headers:
+ content-type: application/json
+ openstack-api-version: placement 1.5
+ status: 404
+
- name: set traits for resource provider
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
request_headers:
@@ -270,7 +307,19 @@ tests:
- CUSTOM_TRAIT_1
- CUSTOM_TRAIT_2
response_strings:
- - CUSTOM_TRAIT_1
+ - "'resource_provider_generation' is a required property"
+
+- name: set traits for resource provider with invalid resource provider generation
+ PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
+ request_headers:
+ content-type: application/json
+ status: 400
+ data:
+ traits:
+ - CUSTOM_TRAIT_1
+ resource_provider_generation: invalid_generation
+ response_strings:
+ - "'invalid_generation' is not of type 'integer'"
- name: set traits for resource provider with conflict generation
PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
@@ -300,6 +349,31 @@ tests:
- NON_EXISTED_TRAIT1
- NON_EXISTED_TRAIT2
+- name: set traits for resource provider with invalid type of traits
+ PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
+ request_headers:
+ content-type: application/json
+ status: 400
+ data:
+ traits: invalid_type
+ resource_provider_generation: 1
+ response_strings:
+ - "'invalid_type' is not of type 'array'"
+
+- name: set traits for resource provider with additional properties
+ PUT: /resource_providers/$ENVIRON['RP_UUID']/traits
+ request_headers:
+ content-type: application/json
+ status: 400
+ data:
+ traits:
+ - CUSTOM_TRAIT_1
+ - CUSTOM_TRAIT_2
+ resource_provider_generation: 1
+ additional: additional
+ response_strings:
+ - 'Additional properties are not allowed'
+
- name: set traits for non_existed resource provider
PUT: /resource_providers/non_existed/traits
request_headers:
@@ -336,6 +410,12 @@ tests:
response_strings:
- No resource provider with uuid non_existed found
+- name: delete traits for resource provider earlier version
+ DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits
+ request_headers:
+ openstack-api-version: placement 1.5
+ status: 404
+
- name: delete traits for resource provider
DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits
status: 204
diff --git a/nova/tests/functional/api/openstack/placement/test_report_client.py b/nova/tests/functional/api/openstack/placement/test_report_client.py
index 93b925c79a..f328f26e18 100644
--- a/nova/tests/functional/api/openstack/placement/test_report_client.py
+++ b/nova/tests/functional/api/openstack/placement/test_report_client.py
@@ -128,7 +128,7 @@ class SchedulerReportClientTests(test.TestCase):
# We should also have empty sets of aggregate and trait
# associations
self.assertEqual(
- [], self.client._get_providers_in_aggregates([uuids.agg]))
+ [], self.client._get_sharing_providers([uuids.agg]))
self.assertFalse(
self.client._provider_tree.have_aggregates_changed(
self.compute_uuid, []))
@@ -331,7 +331,8 @@ class SchedulerReportClientTests(test.TestCase):
self.client.update_compute_node(self.context, self.compute_node)
# The compute node is associated with two of the shared storages
self.client.set_aggregates_for_provider(
- self.compute_uuid, set([uuids.agg_disk_1, uuids.agg_disk_2]))
+ self.context, self.compute_uuid,
+ set([uuids.agg_disk_1, uuids.agg_disk_2]))
# Register two SR-IOV PFs with VF and bandwidth inventory
for x in (1, 2):
@@ -357,10 +358,11 @@ class SchedulerReportClientTests(test.TestCase):
},
}, parent_provider_uuid=self.compute_uuid)
# They're associated with an IP address aggregate
- self.client.set_aggregates_for_provider(uuid, [uuids.agg_ip])
+ self.client.set_aggregates_for_provider(self.context, uuid,
+ [uuids.agg_ip])
# Set some traits on 'em
self.client.set_traits_for_provider(
- uuid, ['CUSTOM_PHYSNET_%d' % x])
+ self.context, uuid, ['CUSTOM_PHYSNET_%d' % x])
# Register three shared storage pools with disk inventory
for x in (1, 2, 3):
@@ -379,11 +381,12 @@ class SchedulerReportClientTests(test.TestCase):
})
# Mark as a sharing provider
self.client.set_traits_for_provider(
- uuid, ['MISC_SHARES_VIA_AGGREGATE'])
+ self.context, uuid, ['MISC_SHARES_VIA_AGGREGATE'])
# Associate each with its own aggregate. The compute node is
# associated with the first two (agg_disk_1 and agg_disk_2).
agg = getattr(uuids, 'agg_disk_%d' % x)
- self.client.set_aggregates_for_provider(uuid, [agg])
+ self.client.set_aggregates_for_provider(self.context, uuid,
+ [agg])
# Register a shared IP address provider with IP address inventory
self.client.set_inventory_for_provider(
@@ -399,9 +402,11 @@ class SchedulerReportClientTests(test.TestCase):
})
# Mark as a sharing provider, and add another trait
self.client.set_traits_for_provider(
- uuids.sip, set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']))
+ self.context, uuids.sip,
+ set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']))
# It's associated with the same aggregate as both PFs
- self.client.set_aggregates_for_provider(uuids.sip, [uuids.agg_ip])
+ self.client.set_aggregates_for_provider(self.context, uuids.sip,
+ [uuids.agg_ip])
# Register a shared network bandwidth provider
self.client.set_inventory_for_provider(
@@ -417,9 +422,10 @@ class SchedulerReportClientTests(test.TestCase):
})
# Mark as a sharing provider
self.client.set_traits_for_provider(
- uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE'])
+ self.context, uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE'])
# It's associated with some other aggregate.
- self.client.set_aggregates_for_provider(uuids.sbw, [uuids.agg_bw])
+ self.client.set_aggregates_for_provider(self.context, uuids.sbw,
+ [uuids.agg_bw])
# Setup is done. Grab the ProviderTree
prov_tree = self.client.get_provider_tree_and_ensure_root(
diff --git a/nova/tests/functional/regressions/test_bug_1404867.py b/nova/tests/functional/regressions/test_bug_1404867.py
new file mode 100644
index 0000000000..5fc6466e0c
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1404867.py
@@ -0,0 +1,107 @@
+# Copyright 2018 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.compute import api as compute_api
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import integrated_helpers
+
+
+class DeleteWithReservedVolumes(integrated_helpers._IntegratedTestBase,
+ integrated_helpers.InstanceHelperMixin):
+ """Test deleting of an instance in error state that has a reserved volume.
+
+ This test boots a server from volume which will fail to be scheduled,
+ ending up in ERROR state with no host assigned and then deletes the server.
+
+ Since the server failed to be scheduled, a local delete should run which
+ will make sure that reserved volumes at the API layer are properly cleaned
+ up.
+
+ The regression is that Nova would not clean up the reserved volumes and
+ the volume would be stuck in 'attaching' state.
+ """
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+
+ def _setup_compute_service(self):
+ # Override `_setup_compute_service` to make sure that we do not start
+ # up the compute service, making sure that the instance will end up
+ # failing to find a valid host.
+ pass
+
+ def _create_error_server(self, volume_id):
+ server = self.api.post_server({
+ 'server': {
+ 'flavorRef': '1',
+ 'name': 'bfv-delete-server-in-error-status',
+ 'networks': 'none',
+ 'block_device_mapping_v2': [
+ {
+ 'boot_index': 0,
+ 'uuid': volume_id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ },
+ ]
+ }
+ })
+ return self._wait_for_state_change(self.api, server, 'ERROR')
+
+ @mock.patch('nova.objects.service.get_minimum_version_all_cells',
+ return_value=compute_api.BFV_RESERVE_MIN_COMPUTE_VERSION)
+ def test_delete_with_reserved_volumes(self, mock_version_get=None):
+ self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
+
+ # Create a server which should go to ERROR state because we don't
+ # have any active computes.
+ volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
+ server = self._create_error_server(volume_id)
+
+ # The status of the volume at this point should be 'attaching' as it
+ # is reserved by Nova by the API.
+ self.assertIn(volume_id, self.cinder.reserved_volumes)
+
+ # Delete this server, which should delete BDMs and remove the
+ # reservation on the instances.
+ self.api.delete_server(server['id'])
+
+ # The volume should no longer be reserved as the deletion of the
+ # server should have released all the resources.
+ self.assertNotIn(volume_id, self.cinder.reserved_volumes)
+
+ @mock.patch('nova.objects.service.get_minimum_version_all_cells',
+ return_value=compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION)
+ def test_delete_with_reserved_volumes_new(self, mock_version_get=None):
+ self.cinder = self.useFixture(
+ nova_fixtures.CinderFixtureNewAttachFlow(self))
+
+ # Create a server which should go to ERROR state because we don't
+ # have any active computes.
+ volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
+ server = self._create_error_server(volume_id)
+ server_id = server['id']
+
+ # There should now exist an attachment to the volume as it was created
+ # by Nova.
+ self.assertIn(volume_id, self.cinder.attachments[server_id])
+
+ # Delete this server, which should delete BDMs and remove the
+ # reservation on the instances.
+ self.api.delete_server(server['id'])
+
+ # The volume should no longer have any attachments as instance delete
+ # should have removed them.
+ self.assertNotIn(volume_id, self.cinder.attachments[server_id])
diff --git a/nova/tests/functional/regressions/test_bug_1670627.py b/nova/tests/functional/regressions/test_bug_1670627.py
index 4c01c01ac3..de970628c7 100644
--- a/nova/tests/functional/regressions/test_bug_1670627.py
+++ b/nova/tests/functional/regressions/test_bug_1670627.py
@@ -59,6 +59,7 @@ class TestDeleteFromCell0CheckQuota(test.TestCase):
self.start_service('conductor')
self.start_service('scheduler')
+ self.start_service('consoleauth')
# We don't actually start a compute service; this way we don't have any
# compute hosts to schedule the instance to and will go into error and
diff --git a/nova/tests/functional/regressions/test_bug_1689692.py b/nova/tests/functional/regressions/test_bug_1689692.py
index 1b6cf48d83..5c9d137ae2 100644
--- a/nova/tests/functional/regressions/test_bug_1689692.py
+++ b/nova/tests/functional/regressions/test_bug_1689692.py
@@ -53,6 +53,7 @@ class ServerListLimitMarkerCell0Test(test.TestCase,
self.start_service('conductor')
self.start_service('scheduler')
+ self.start_service('consoleauth')
# We don't start the compute service because we want NoValidHost so
# all of the instances go into ERROR state and get put into cell0.
self.useFixture(cast_as_call.CastAsCall(self))
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index f1d2de2337..cee57baae8 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -2728,9 +2728,10 @@ class ServerMovingTests(ProviderUsageBaseTestCase):
allocations = self._get_allocations_by_server_uuid(server['id'])
self.assertIn(source_rp_uuid, allocations)
- def test_resize_to_same_host_prep_resize_fails(self):
+ def _test_resize_to_same_host_instance_fails(self, failing_method,
+ event_name):
"""Tests that when we resize to the same host and resize fails in
- the prep_resize method, we cleanup the allocations before rescheduling.
+ the given method, we cleanup the allocations before rescheduling.
"""
# make sure that the test only uses a single host
compute2_service_id = self.admin_api.get_services(
@@ -2742,16 +2743,17 @@ class ServerMovingTests(ProviderUsageBaseTestCase):
server = self._boot_and_check_allocations(self.flavor1, hostname)
- def fake_prep_resize(*args, **kwargs):
+ def fake_resize_method(*args, **kwargs):
# Ensure the allocations are doubled now before we fail.
usages = self._get_provider_usages(rp_uuid)
self.assertFlavorsMatchAllocation(
self.flavor1, self.flavor2, usages)
- raise test.TestingException('Simulated _prep_resize failure.')
+ raise test.TestingException('Simulated resize failure.')
# Yes this isn't great in a functional test, but it's simple.
- self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
- fake_prep_resize)
+ self.stub_out(
+ 'nova.compute.manager.ComputeManager.%s' % failing_method,
+ fake_resize_method)
self.flags(allow_resize_to_same_host=True)
resize_req = {
@@ -2762,7 +2764,7 @@ class ServerMovingTests(ProviderUsageBaseTestCase):
self.api.post_server_action(server['id'], resize_req)
self._wait_for_action_fail_completion(
- server, instance_actions.RESIZE, 'compute_prep_resize')
+ server, instance_actions.RESIZE, event_name)
# Ensure the allocation records still exist on the host.
source_rp_uuid = self._get_provider_uuid_by_host(hostname)
@@ -2771,6 +2773,18 @@ class ServerMovingTests(ProviderUsageBaseTestCase):
# allocation which just leaves us with the original flavor.
self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
+ def test_resize_to_same_host_prep_resize_fails(self):
+ self._test_resize_to_same_host_instance_fails(
+ '_prep_resize', 'compute_prep_resize')
+
+ def test_resize_instance_fails_allocation_cleanup(self):
+ self._test_resize_to_same_host_instance_fails(
+ '_resize_instance', 'compute_resize_instance')
+
+ def test_finish_resize_fails_allocation_cleanup(self):
+ self._test_resize_to_same_host_instance_fails(
+ '_finish_resize', 'compute_finish_resize')
+
def _test_resize_reschedule_uses_host_lists(self, fails, num_alts=None):
"""Test that when a resize attempt fails, the retry comes from the
supplied host_list, and does not call the scheduler.
diff --git a/nova/tests/functional/wsgi/test_servers.py b/nova/tests/functional/wsgi/test_servers.py
index 8d643602ff..3c2aea783b 100644
--- a/nova/tests/functional/wsgi/test_servers.py
+++ b/nova/tests/functional/wsgi/test_servers.py
@@ -10,13 +10,18 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
+
+from nova.compute import api as compute_api
from nova import test
from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import policy_fixture
-class ServersPreSchedulingTestCase(test.TestCase):
+class ServersPreSchedulingTestCase(test.TestCase,
+ integrated_helpers.InstanceHelperMixin):
"""Tests for the servers API with unscheduled instances.
With cellsv2 an instance is not written to an instance table in the cell
@@ -237,3 +242,75 @@ class ServersPreSchedulingTestCase(test.TestCase):
'servers/detail?not-tags-any=tag1,tag3')
list_resp = list_resp.body['servers']
self.assertEqual(0, len(list_resp))
+
+ @mock.patch('nova.objects.service.get_minimum_version_all_cells',
+ return_value=compute_api.BFV_RESERVE_MIN_COMPUTE_VERSION)
+ def test_bfv_delete_build_request_pre_scheduling_ocata(self, mock_get):
+ cinder = self.useFixture(nova_fixtures.CinderFixture(self))
+
+ volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
+ server = self.api.post_server({
+ 'server': {
+ 'flavorRef': '1',
+ 'name': 'test_bfv_delete_build_request_pre_scheduling',
+ 'networks': 'none',
+ 'block_device_mapping_v2': [
+ {
+ 'boot_index': 0,
+ 'uuid': volume_id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ },
+ ]
+ }
+ })
+
+ # Since _IntegratedTestBase uses the CastAsCall fixture, when we
+ # get the server back we know all of the volume stuff should be done.
+ self.assertIn(volume_id, cinder.reserved_volumes)
+
+ # Now delete the server, which should go through the "local delete"
+ # code in the API, find the build request and delete it along with
+ # detaching the volume from the instance.
+ self.api.delete_server(server['id'])
+
+ # The volume should no longer have any attachments as instance delete
+ # should have removed them.
+ self.assertNotIn(volume_id, cinder.reserved_volumes)
+
+ def test_bfv_delete_build_request_pre_scheduling(self):
+ cinder = self.useFixture(
+ nova_fixtures.CinderFixtureNewAttachFlow(self))
+ # This makes the get_minimum_version_all_cells check say we're running
+ # the latest of everything.
+ self.useFixture(nova_fixtures.AllServicesCurrent())
+
+ volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
+ server = self.api.post_server({
+ 'server': {
+ 'flavorRef': '1',
+ 'name': 'test_bfv_delete_build_request_pre_scheduling',
+ 'networks': 'none',
+ 'block_device_mapping_v2': [
+ {
+ 'boot_index': 0,
+ 'uuid': volume_id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ },
+ ]
+ }
+ })
+
+ # Since _IntegratedTestBase uses the CastAsCall fixture, when we
+ # get the server back we know all of the volume stuff should be done.
+ self.assertIn(volume_id, cinder.attachments[server['id']])
+
+ # Now delete the server, which should go through the "local delete"
+ # code in the API, find the build request and delete it along with
+ # detaching the volume from the instance.
+ self.api.delete_server(server['id'])
+
+ # The volume should no longer have any attachments as instance delete
+ # should have removed them.
+ self.assertNotIn(volume_id, cinder.attachments[server['id']])
diff --git a/nova/tests/unit/api/openstack/compute/test_serversV21.py b/nova/tests/unit/api/openstack/compute/test_serversV21.py
index 3595f3802d..4d65d5f070 100644
--- a/nova/tests/unit/api/openstack/compute/test_serversV21.py
+++ b/nova/tests/unit/api/openstack/compute/test_serversV21.py
@@ -62,6 +62,7 @@ from nova import policy
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit.image import fake
@@ -4238,26 +4239,36 @@ class ServersControllerCreateTestV257(test.NoDBTestCase):
self.assertIn('personality', six.text_type(ex))
+@mock.patch('nova.compute.utils.check_num_instances_quota',
+ new=lambda *args, **kwargs: 1)
class ServersControllerCreateTestV260(test.NoDBTestCase):
- """Negative tests for creating a server with a multiattach volume using
- microversion 2.60.
- """
+ """Negative tests for creating a server with a multiattach volume."""
def setUp(self):
- self.useFixture(nova_fixtures.AllServicesCurrent())
super(ServersControllerCreateTestV260, self).setUp()
+ self.useFixture(nova_fixtures.NoopQuotaDriverFixture())
self.controller = servers.ServersController()
get_flavor_mock = mock.patch(
'nova.compute.flavors.get_flavor_by_flavor_id',
- return_value=objects.Flavor(flavorid='1'))
+ return_value=fake_flavor.fake_flavor_obj(
+ context.get_admin_context(), flavorid='1'))
get_flavor_mock.start()
self.addCleanup(get_flavor_mock.stop)
+ reqspec_create_mock = mock.patch(
+ 'nova.objects.RequestSpec.create')
+ reqspec_create_mock.start()
+ self.addCleanup(reqspec_create_mock.stop)
+ volume_get_mock = mock.patch(
+ 'nova.volume.cinder.API.get',
+ return_value={'id': uuids.fake_volume_id, 'multiattach': True})
+ volume_get_mock.start()
+ self.addCleanup(volume_get_mock.stop)
def _post_server(self, version=None):
body = {
'server': {
'name': 'multiattach',
'flavorRef': '1',
- 'networks': 'auto',
+ 'networks': 'none',
'block_device_mapping_v2': [{
'uuid': uuids.fake_volume_id,
'source_type': 'volume',
@@ -4277,30 +4288,20 @@ class ServersControllerCreateTestV260(test.NoDBTestCase):
"""Tests the case that the user tries to boot from volume with a
multiattach volume but before using microversion 2.60.
"""
- with mock.patch.object(
- self.controller.compute_api, 'create',
- side_effect=
- exception.MultiattachNotSupportedOldMicroversion) as create:
- ex = self.assertRaises(webob.exc.HTTPBadRequest,
- self._post_server, '2.59')
- create_kwargs = create.call_args[1]
- self.assertFalse(create_kwargs['supports_multiattach'])
+ self.useFixture(nova_fixtures.AllServicesCurrent())
+ ex = self.assertRaises(webob.exc.HTTPBadRequest,
+ self._post_server, '2.59')
self.assertIn('Multiattach volumes are only supported starting with '
'compute API version 2.60', six.text_type(ex))
- def test_create_server_with_multiattach_fails_not_available(self):
+ @mock.patch('nova.objects.service.get_minimum_version_all_cells',
+ return_value=compute_api.MIN_COMPUTE_MULTIATTACH - 1)
+ def test_create_server_with_multiattach_fails_not_available(
+ self, mock_get_min_version_all_cells):
"""Tests the case that the user tries to boot from volume with a
multiattach volume but before the deployment is fully upgraded.
-
- Yes, you should ignore the AllServicesCurrent fixture in the setUp.
"""
- with mock.patch.object(
- self.controller.compute_api, 'create',
- side_effect=
- exception.MultiattachSupportNotYetAvailable) as create:
- ex = self.assertRaises(webob.exc.HTTPConflict, self._post_server)
- create_kwargs = create.call_args[1]
- self.assertTrue(create_kwargs['supports_multiattach'])
+ ex = self.assertRaises(webob.exc.HTTPConflict, self._post_server)
self.assertIn('Multiattach volume support is not yet available',
six.text_type(ex))
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index faa0fab8d6..e968bcc94a 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -4622,6 +4622,7 @@ class ComputeTestCase(BaseTestCase,
# ensure that task_state is reverted after a failed operation.
migration = objects.Migration(context=self.context.elevated())
migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
+ migration.uuid = mock.sentinel.uuid
migration.new_instance_type_id = '1'
instance_type = objects.Flavor()
diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py
index 6447ededcd..ae6ff253f9 100644
--- a/nova/tests/unit/compute/test_compute_api.py
+++ b/nova/tests/unit/compute/test_compute_api.py
@@ -1115,7 +1115,7 @@ class _ComputeAPIUnitTestMixIn(object):
if self.cell_type != 'api':
if inst.vm_state == vm_states.RESIZED:
self._test_delete_resized_part(inst)
- if inst.vm_state != vm_states.SHELVED_OFFLOADED:
+ if inst.host is not None:
self.context.elevated().AndReturn(self.context)
objects.Service.get_by_compute_host(self.context,
inst.host).AndReturn(objects.Service())
@@ -1123,9 +1123,7 @@ class _ComputeAPIUnitTestMixIn(object):
mox.IsA(objects.Service)).AndReturn(
inst.host != 'down-host')
- if (inst.host == 'down-host' or
- inst.vm_state == vm_states.SHELVED_OFFLOADED):
-
+ if inst.host == 'down-host' or inst.host is None:
self._test_downed_host_part(inst, updates, delete_time,
delete_type)
cast = False
@@ -1215,6 +1213,76 @@ class _ComputeAPIUnitTestMixIn(object):
system_metadata=fake_sys_meta)
self._test_delete('force_delete', vm_state=vm_state)
+ @mock.patch('nova.compute.api.API._delete_while_booting',
+ return_value=False)
+ @mock.patch('nova.compute.api.API._lookup_instance')
+ @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.compute.utils.notify_about_instance_usage')
+ @mock.patch('nova.objects.Service.get_by_compute_host')
+ @mock.patch('nova.compute.api.API._local_delete')
+ def test_delete_error_state_with_no_host(
+ self, mock_local_delete, mock_service_get, _mock_notify,
+ _mock_save, mock_bdm_get, mock_lookup, _mock_del_booting):
+ # Instance in error state with no host should be a local delete
+ # for non API cells
+ inst = self._create_instance_obj(params=dict(vm_state=vm_states.ERROR,
+ host=None))
+ mock_lookup.return_value = None, inst
+ with mock.patch.object(self.compute_api.compute_rpcapi,
+ 'terminate_instance') as mock_terminate:
+ self.compute_api.delete(self.context, inst)
+ if self.cell_type == 'api':
+ mock_terminate.assert_called_once_with(
+ self.context, inst, mock_bdm_get.return_value,
+ delete_type='delete')
+ mock_local_delete.assert_not_called()
+ else:
+ mock_local_delete.assert_called_once_with(
+ self.context, inst, mock_bdm_get.return_value,
+ 'delete', self.compute_api._do_delete)
+ mock_terminate.assert_not_called()
+ mock_service_get.assert_not_called()
+
+ @mock.patch('nova.compute.api.API._delete_while_booting',
+ return_value=False)
+ @mock.patch('nova.compute.api.API._lookup_instance')
+ @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.compute.utils.notify_about_instance_usage')
+ @mock.patch('nova.objects.Service.get_by_compute_host')
+ @mock.patch('nova.context.RequestContext.elevated')
+ @mock.patch('nova.servicegroup.api.API.service_is_up', return_value=True)
+ @mock.patch('nova.compute.api.API._record_action_start')
+ @mock.patch('nova.compute.api.API._local_delete')
+ def test_delete_error_state_with_host_set(
+ self, mock_local_delete, _mock_record, mock_service_up,
+ mock_elevated, mock_service_get, _mock_notify, _mock_save,
+ mock_bdm_get, mock_lookup, _mock_del_booting):
+ # Instance in error state with host set should be a non-local delete
+ # for non API cells if the service is up
+ inst = self._create_instance_obj(params=dict(vm_state=vm_states.ERROR,
+ host='fake-host'))
+ mock_lookup.return_value = inst
+ with mock.patch.object(self.compute_api.compute_rpcapi,
+ 'terminate_instance') as mock_terminate:
+ self.compute_api.delete(self.context, inst)
+ if self.cell_type == 'api':
+ mock_terminate.assert_called_once_with(
+ self.context, inst, mock_bdm_get.return_value,
+ delete_type='delete')
+ mock_local_delete.assert_not_called()
+ mock_service_get.assert_not_called()
+ else:
+ mock_service_get.assert_called_once_with(
+ mock_elevated.return_value, 'fake-host')
+ mock_service_up.assert_called_once_with(
+ mock_service_get.return_value)
+ mock_terminate.assert_called_once_with(
+ self.context, inst, mock_bdm_get.return_value,
+ delete_type='delete')
+ mock_local_delete.assert_not_called()
+
def test_delete_fast_if_host_not_set(self):
self.useFixture(fixtures.AllServicesCurrent())
inst = self._create_instance_obj()
@@ -1420,6 +1488,52 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertIsNone(
self.compute_api._get_stashed_volume_connector(bdm, inst))
+ @mock.patch.object(objects.BlockDeviceMapping, 'destroy')
+ def test_local_cleanup_bdm_volumes_stashed_connector_host_none(
+ self, mock_destroy):
+ """Tests that we call volume_api.terminate_connection when we found
+ a stashed connector in the bdm.connection_info dict.
+
+ This tests the case where:
+
+ 1) the instance host is None
+ 2) the instance vm_state is one where we expect host to be None
+
+ We allow a mismatch of the host in this situation if the instance is
+ in a state where we expect its host to have been set to None, such
+ as ERROR or SHELVED_OFFLOADED.
+ """
+ params = dict(host=None, vm_state=vm_states.ERROR)
+ inst = self._create_instance_obj(params=params)
+ conn_info = {'connector': {'host': 'orig-host'}}
+ vol_bdm = objects.BlockDeviceMapping(self.context, id=1,
+ instance_uuid=inst.uuid,
+ volume_id=uuids.volume_id,
+ source_type='volume',
+ destination_type='volume',
+ delete_on_termination=True,
+ connection_info=jsonutils.dumps(
+ conn_info),
+ attachment_id=None)
+ bdms = objects.BlockDeviceMappingList(objects=[vol_bdm])
+
+ @mock.patch.object(self.compute_api.volume_api, 'terminate_connection')
+ @mock.patch.object(self.compute_api.volume_api, 'detach')
+ @mock.patch.object(self.compute_api.volume_api, 'delete')
+ @mock.patch.object(self.context, 'elevated', return_value=self.context)
+ def do_test(self, mock_elevated, mock_delete,
+ mock_detach, mock_terminate):
+ self.compute_api._local_cleanup_bdm_volumes(
+ bdms, inst, self.context)
+ mock_terminate.assert_called_once_with(
+ self.context, uuids.volume_id, conn_info['connector'])
+ mock_detach.assert_called_once_with(
+ self.context, uuids.volume_id, inst.uuid)
+ mock_delete.assert_called_once_with(self.context, uuids.volume_id)
+ mock_destroy.assert_called_once_with()
+
+ do_test(self)
+
def test_local_delete_without_info_cache(self):
inst = self._create_instance_obj()
@@ -3945,6 +4059,46 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(objects.Service, 'get_minimum_version',
return_value=17)
@mock.patch.object(cinder.API, 'get')
+ @mock.patch.object(cinder.API, 'reserve_volume')
+ def test_validate_bdm_returns_attachment_id(self, mock_reserve_volume,
+ mock_get, mock_get_min_ver,
+ mock_get_min_ver_all):
+ # Tests that bdm validation *always* returns an attachment_id even if
+ # it's None.
+ instance = self._create_instance_obj()
+ instance_type = self._create_flavor()
+ volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8'
+ volume_info = {'status': 'available',
+ 'attach_status': 'detached',
+ 'id': volume_id,
+ 'multiattach': False}
+ mock_get.return_value = volume_info
+
+ # NOTE(mnaser): We use the AnonFakeDbBlockDeviceDict to make sure that
+ # the attachment_id field does not get any defaults to
+ # properly test this function.
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.AnonFakeDbBlockDeviceDict(
+ {
+ 'boot_index': 0,
+ 'volume_id': volume_id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ }))]
+ self.compute_api._validate_bdm(self.context, instance, instance_type,
+ bdms)
+ self.assertIsNone(bdms[0].attachment_id)
+
+ mock_get.assert_called_once_with(self.context, volume_id)
+ mock_reserve_volume.assert_called_once_with(
+ self.context, volume_id)
+
+ @mock.patch.object(objects.service, 'get_minimum_version_all_cells',
+ return_value=17)
+ @mock.patch.object(objects.Service, 'get_minimum_version',
+ return_value=17)
+ @mock.patch.object(cinder.API, 'get')
@mock.patch.object(cinder.API, 'reserve_volume',
side_effect=exception.InvalidInput(reason='error'))
def test_validate_bdm_with_error_volume(self, mock_reserve_volume,
@@ -5799,6 +5953,57 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
fields=['device_id'])
self.assertEqual([], instances.objects)
+ @mock.patch('nova.compute.api.API._delete_while_booting',
+ return_value=False)
+ @mock.patch('nova.compute.api.API._lookup_instance')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch('nova.context.RequestContext.elevated')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(compute_utils, 'notify_about_instance_usage')
+ @mock.patch.object(objects.BlockDeviceMapping, 'destroy')
+ @mock.patch.object(objects.Instance, 'destroy')
+ def _test_delete_volume_backed_instance(
+ self, vm_state, mock_instance_destroy, bdm_destroy,
+ notify_about_instance_usage, mock_save, mock_elevated,
+ bdm_get_by_instance_uuid, mock_lookup, _mock_del_booting):
+ volume_id = uuidutils.generate_uuid()
+ conn_info = {'connector': {'host': 'orig-host'}}
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 42, 'volume_id': volume_id,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'delete_on_termination': False,
+ 'connection_info': jsonutils.dumps(conn_info)}))]
+
+ bdm_get_by_instance_uuid.return_value = bdms
+ mock_elevated.return_value = self.context
+
+ params = {'host': None, 'vm_state': vm_state}
+ inst = self._create_instance_obj(params=params)
+ mock_lookup.return_value = None, inst
+ connector = conn_info['connector']
+
+ with mock.patch.object(self.compute_api.network_api,
+ 'deallocate_for_instance') as mock_deallocate, \
+ mock.patch.object(self.compute_api.volume_api,
+ 'terminate_connection') as mock_terminate_conn, \
+ mock.patch.object(self.compute_api.volume_api,
+ 'detach') as mock_detach:
+ self.compute_api.delete(self.context, inst)
+
+ mock_deallocate.assert_called_once_with(self.context, inst)
+ mock_detach.assert_called_once_with(self.context, volume_id,
+ inst.uuid)
+ mock_terminate_conn.assert_called_once_with(self.context,
+ volume_id, connector)
+ bdm_destroy.assert_called_once_with()
+
+ def test_delete_volume_backed_instance_in_error(self):
+ self._test_delete_volume_backed_instance(vm_states.ERROR)
+
+ def test_delete_volume_backed_instance_in_shelved_offloaded(self):
+ self._test_delete_volume_backed_instance(vm_states.SHELVED_OFFLOADED)
+
class Cellsv1DeprecatedTestMixIn(object):
@mock.patch.object(objects.BuildRequestList, 'get_by_filters')
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 4e8bb0cfe2..058fbe5ea6 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -725,7 +725,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.compute.init_host()
mock_remove_allocation.assert_called_once_with(
- deleted_instance.uuid, uuids.our_node_uuid,
+ self.context, deleted_instance.uuid, uuids.our_node_uuid,
deleted_instance.user_id, deleted_instance.project_id,
mock.sentinel.my_resources)
@@ -3595,8 +3595,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
get_node.assert_called_once_with(
self.context, our_host, migration.source_node)
remove_allocation.assert_called_once_with(
- instance_2.uuid, uuids.our_node_uuid, uuids.user_id,
- uuids.project_id, mock.sentinel.resources)
+ self.context, instance_2.uuid, uuids.our_node_uuid,
+ uuids.user_id, uuids.project_id, mock.sentinel.resources)
def test_destroy_evacuated_instances_node_deleted(self):
our_host = self.compute.host
@@ -3672,8 +3672,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
# but only instance_2 is deallocated as the compute node for
# instance_1 is already deleted
remove_allocation.assert_called_once_with(
- instance_2.uuid, uuids.our_node_uuid, uuids.user_id,
- uuids.project_id, mock.sentinel.resources)
+ self.context, instance_2.uuid, uuids.our_node_uuid,
+ uuids.user_id, uuids.project_id, mock.sentinel.resources)
self.assertEqual(2, get_node.call_count)
@@ -3923,10 +3923,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.assertFalse(
rt.delete_allocation_for_evacuated_instance.called)
+ @mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.compute.utils.add_instance_fault_from_exc')
@mock.patch.object(manager.ComputeManager,
'_error_out_instance_on_exception')
- def test_rebuild_driver_error_evacuate(self, mock_error, mock_aiffe):
+ def test_rebuild_driver_error_evacuate(self, mock_error, mock_aiffe,
+ mock_elevated):
+ mock_elevated.return_value = self.context
instance = fake_instance.fake_instance_obj(self.context)
ex = test.TestingException('foo')
with mock.patch.object(self.compute, '_get_resource_tracker') as mrt:
@@ -3935,7 +3938,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
recreate=True, scheduled_node='foo')
rt = mrt.return_value
delete_alloc = rt.delete_allocation_for_evacuated_instance
- delete_alloc.assert_called_once_with(instance, 'foo',
+ delete_alloc.assert_called_once_with(self.context, instance, 'foo',
node_type='destination')
@mock.patch('nova.context.RequestContext.elevated')
@@ -4018,7 +4021,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
mock_delete_allocation.assert_called_once_with(
- instance, 'fake-node', node_type='destination')
+ elevated_context, instance, 'fake-node', node_type='destination')
mock_notify.assert_called_once_with(
elevated_context, instance, 'fake-mini', action='rebuild',
bdms=None, exception=exc, phase='error')
@@ -5994,6 +5997,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
self.migration = objects.Migration(
context=self.context.elevated(),
+ uuid=mock.sentinel.uuid,
instance_uuid=self.instance.uuid,
new_instance_type_id=7,
dest_compute=None,
@@ -6393,7 +6397,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
rt.get_node_uuid.assert_called_once_with(mock.sentinel.node)
remove = mock_rc.remove_provider_from_instance_allocation
remove.assert_called_once_with(
- instance.uuid, rt.get_node_uuid.return_value,
+ self.context, instance.uuid, rt.get_node_uuid.return_value,
instance.user_id, instance.project_id,
mock_resources.return_value)
do_it()
@@ -7022,7 +7026,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
# ...so we should have called the old style delete
mock_delete.assert_not_called()
fn = mock_rt.return_value.delete_allocation_for_migrated_instance
- fn.assert_called_once_with(self.instance, self.instance.node)
+ fn.assert_called_once_with(self.context, self.instance,
+ self.instance.node)
def test_post_live_migration_legacy(self):
# We have no migrate_data...
@@ -7044,7 +7049,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
# ...so we should have called the old style delete
mock_delete.assert_not_called()
fn = mock_rt.return_value.delete_allocation_for_migrated_instance
- fn.assert_called_once_with(self.instance, self.instance.node)
+ fn.assert_called_once_with(self.context, self.instance,
+ self.instance.node)
def test_post_live_migration_cinder_v3_api(self):
# Because live migration has succeeded, _post_live_migration
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index 9b957b3fa4..f57d4c10a1 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -1315,6 +1315,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.rt._update(mock.sentinel.ctx, new_compute)
rc.set_traits_for_provider.assert_called_once_with(
+ mock.sentinel.ctx,
new_compute.uuid,
mock.sentinel.traits,
)
@@ -2842,13 +2843,15 @@ class TestUpdateUsageFromInstance(BaseTestCase):
mock_resource_from_flavor.return_value = mock_resource
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.uuid = uuids.inst0
+ ctxt = context.get_admin_context()
- self.rt.delete_allocation_for_evacuated_instance(instance, _NODENAME)
+ self.rt.delete_allocation_for_evacuated_instance(
+ ctxt, instance, _NODENAME)
rc = self.rt.reportclient
mock_remove_allocation = rc.remove_provider_from_instance_allocation
mock_remove_allocation.assert_called_once_with(
- instance.uuid, self.rt.compute_nodes[_NODENAME].uuid,
+ ctxt, instance.uuid, self.rt.compute_nodes[_NODENAME].uuid,
instance.user_id, instance.project_id, mock_resource)
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index 5c146b9694..0f6295604e 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -454,6 +454,91 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.mock_get_allocs.assert_called_once_with(instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
+ @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
+ @mock.patch('nova.compute.utils.notify_about_instance_action')
+ @mock.patch.object(nova.compute.resource_tracker.ResourceTracker,
+ 'instance_claim')
+ @mock.patch.object(neutron_api.API, 'setup_instance_network_on_host')
+ @mock.patch.object(nova.virt.fake.SmallFakeDriver, 'spawn',
+ side_effect=test.TestingException('oops!'))
+ @mock.patch.object(nova.compute.manager.ComputeManager,
+ '_prep_block_device', return_value='fake_bdm')
+ @mock.patch.object(nova.compute.manager.ComputeManager,
+ '_notify_about_instance_usage')
+ @mock.patch('nova.utils.get_image_from_system_metadata')
+ @mock.patch.object(nova.compute.manager.ComputeManager,
+ '_terminate_volume_connections')
+ def test_unshelve_spawn_fails_cleanup_volume_connections(
+ self, mock_terminate_volume_connections, mock_image_meta,
+ mock_notify_instance_usage, mock_prep_block_device, mock_spawn,
+ mock_setup_network, mock_instance_claim,
+ mock_notify_instance_action, mock_get_bdms):
+ """Tests error handling when a instance fails to unshelve and makes
+ sure that volume connections are cleaned up from the host
+ and that the host/node values are unset on the instance.
+ """
+ mock_bdms = mock.Mock()
+ mock_get_bdms.return_value = mock_bdms
+ instance = self._create_fake_instance_obj()
+ node = test_compute.NODENAME
+ limits = {}
+ filter_properties = {'limits': limits}
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+ image_meta = {'properties': {'base_image_ref': uuids.image_id}}
+ mock_image_meta.return_value = image_meta
+
+ tracking = {'last_state': instance.task_state}
+
+ def fake_claim(context, instance, node, limits):
+ instance.host = self.compute.host
+ instance.node = node
+ requests = objects.InstancePCIRequests(requests=[])
+ return claims.Claim(context, instance, node,
+ self.rt, _fake_resources(),
+ requests, limits=limits)
+ mock_instance_claim.side_effect = fake_claim
+
+ def check_save(expected_task_state=None):
+ if tracking['last_state'] == task_states.UNSHELVING:
+ # This is before we've failed.
+ self.assertEqual(task_states.SPAWNING, instance.task_state)
+ tracking['last_state'] = instance.task_state
+ elif tracking['last_state'] == task_states.SPAWNING:
+ # This is after we've failed.
+ self.assertIsNone(instance.host)
+ self.assertIsNone(instance.node)
+ self.assertIsNone(instance.task_state)
+ tracking['last_state'] = instance.task_state
+ else:
+ self.fail('Unexpected save!')
+
+ with mock.patch.object(instance, 'save') as mock_save:
+ mock_save.side_effect = check_save
+ self.assertRaises(test.TestingException,
+ self.compute.unshelve_instance,
+ self.context, instance, image=None,
+ filter_properties=filter_properties, node=node)
+
+ mock_notify_instance_action.assert_called_once_with(
+ self.context, instance, 'fake-mini', action='unshelve',
+ phase='start', bdms=mock_bdms)
+ mock_notify_instance_usage.assert_called_once_with(
+ self.context, instance, 'unshelve.start')
+ mock_prep_block_device.assert_called_once_with(
+ self.context, instance, mock_bdms)
+ mock_setup_network.assert_called_once_with(self.context, instance,
+ self.compute.host)
+ mock_instance_claim.assert_called_once_with(self.context, instance,
+ test_compute.NODENAME,
+ limits)
+ mock_spawn.assert_called_once_with(
+ self.context, instance, test.MatchType(objects.ImageMeta),
+ injected_files=[], admin_password=None,
+ allocations={}, network_info=[], block_device_info='fake_bdm')
+ mock_terminate_volume_connections.assert_called_once_with(
+ self.context, instance, mock_bdms)
+
@mock.patch.object(objects.InstanceList, 'get_by_filters')
def test_shelved_poll_none_offloaded(self, mock_get_by_filters):
# Test instances are not offloaded when shelved_offload_time is -1
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 4a69918b3e..4662e60c9f 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -1859,12 +1859,15 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
select_dest,
build_and_run):
def _fake_bury(ctxt, request_spec, exc,
- build_requests=None, instances=None):
+ build_requests=None, instances=None,
+ block_device_mapping=None):
self.assertIn('not mapped to any cell', str(exc))
self.assertEqual(1, len(build_requests))
self.assertEqual(1, len(instances))
self.assertEqual(build_requests[0].instance_uuid,
instances[0].uuid)
+ self.assertEqual(self.params['block_device_mapping'],
+ block_device_mapping)
bury.side_effect = _fake_bury
select_dest.return_value = [[fake_selection1]]
@@ -2005,6 +2008,27 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.assertEqual(expected, inst_states)
+ @mock.patch.object(objects.CellMapping, 'get_by_uuid')
+ @mock.patch.object(conductor_manager.ComputeTaskManager,
+ '_create_block_device_mapping')
+ def test_bury_in_cell0_with_block_device_mapping(self, mock_create_bdm,
+ mock_get_cell):
+ mock_get_cell.return_value = self.cell_mappings['cell0']
+
+ inst_br = fake_build_request.fake_req_obj(self.ctxt)
+ del inst_br.instance.id
+ inst_br.create()
+ inst = inst_br.get_new_instance(self.ctxt)
+
+ self.conductor._bury_in_cell0(
+ self.ctxt, self.params['request_specs'][0], Exception('Foo'),
+ build_requests=[inst_br], instances=[inst],
+ block_device_mapping=self.params['block_device_mapping'])
+
+ mock_create_bdm.assert_called_once_with(
+ self.cell_mappings['cell0'], inst.flavor, inst.uuid,
+ self.params['block_device_mapping'])
+
def test_reset(self):
with mock.patch('nova.compute.rpcapi.ComputeAPI') as mock_rpc:
old_rpcapi = self.conductor_manager.compute_rpcapi
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
index d09838d4ee..488fdf3a96 100644
--- a/nova/tests/unit/objects/test_instance.py
+++ b/nova/tests/unit/objects/test_instance.py
@@ -220,6 +220,22 @@ class _TestInstanceObject(object):
deleted=True)
self.assertEqual(0, len(instance.tags))
+ def test_lazy_load_generic_on_deleted_instance(self):
+ # For generic fields, we try to load the deleted record from the
+ # database.
+ instance = objects.Instance(self.context, uuid=uuids.instance,
+ user_id=self.context.user_id,
+ project_id=self.context.project_id)
+ instance.create()
+ instance.destroy()
+ # Re-create our local object to make sure it doesn't have sysmeta
+ # filled in by create()
+ instance = objects.Instance(self.context, uuid=uuids.instance,
+ user_id=self.context.user_id,
+ project_id=self.context.project_id)
+ self.assertNotIn('system_metadata', instance)
+ self.assertEqual(0, len(instance.system_metadata))
+
def test_lazy_load_tags(self):
instance = objects.Instance(self.context, uuid=uuids.instance,
user_id=self.context.user_id,
diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py
index acf1fc2f8e..8d825c9588 100644
--- a/nova/tests/unit/scheduler/client/test_report.py
+++ b/nova/tests/unit/scheduler/client/test_report.py
@@ -266,11 +266,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
- resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
+ resp = self.client.put_allocations(self.context, rp_uuid,
+ consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertTrue(resp)
- mock_put.assert_called_once_with(expected_url, mock.ANY, version='1.8')
+ mock_put.assert_called_once_with(
+ expected_url, mock.ANY, version='1.8',
+ global_request_id=self.context.global_id)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_fail_fallback_succeeds(self, mock_put):
@@ -285,12 +288,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
- resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
+ resp = self.client.put_allocations(self.context, rp_uuid,
+ consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertTrue(resp)
# Should fall back to earlier way if 1.8 fails.
- call1 = mock.call(expected_url, mock.ANY, version='1.8')
+ call1 = mock.call(expected_url, mock.ANY, version='1.8',
+ global_request_id=self.context.global_id)
call2 = mock.call(expected_url, mock.ANY)
self.assertEqual(2, mock_put.call_count)
mock_put.assert_has_calls([call1, call2])
@@ -304,11 +309,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
- resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
+ resp = self.client.put_allocations(self.context, rp_uuid,
+ consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertFalse(resp)
- mock_put.assert_called_once_with(expected_url, mock.ANY, version='1.8')
+ mock_put.assert_called_once_with(
+ expected_url, mock.ANY, version='1.8',
+ global_request_id=self.context.global_id)
log_msg = mock_warn.call_args[0][0]
self.assertIn("Unable to submit allocation for instance", log_msg)
@@ -328,13 +336,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
- resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
+ resp = self.client.put_allocations(self.context, rp_uuid,
+ consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertTrue(resp)
mock_put.assert_has_calls([
- mock.call(expected_url, mock.ANY, version='1.8'),
- mock.call(expected_url, mock.ANY, version='1.8')])
+ mock.call(expected_url, mock.ANY, version='1.8',
+ global_request_id=self.context.global_id)] * 2)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_retry_gives_up(self, mock_put):
@@ -349,14 +358,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
- resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
+ resp = self.client.put_allocations(self.context, rp_uuid,
+ consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertFalse(resp)
mock_put.assert_has_calls([
- mock.call(expected_url, mock.ANY, version='1.8'),
- mock.call(expected_url, mock.ANY, version='1.8'),
- mock.call(expected_url, mock.ANY, version='1.8')])
+ mock.call(expected_url, mock.ANY, version='1.8',
+ global_request_id=self.context.global_id)] * 3)
def test_claim_resources_success_with_old_version(self):
get_resp_mock = mock.Mock(status_code=200)
@@ -898,7 +907,8 @@ class TestPutAllocations(SchedulerReportClientTestCase):
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
- consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
+ self.context, consumer_uuid, uuids.source, user_id, project_id,
+ mock.Mock())
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
@@ -928,7 +938,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
self.assertEqual(expected_allocations, actual_allocations)
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.10', json=mock.ANY, raise_exc=False,
- headers={})
+ headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(res)
@@ -971,7 +981,8 @@ class TestPutAllocations(SchedulerReportClientTestCase):
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
- consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
+ self.context, consumer_uuid, uuids.source, user_id, project_id,
+ mock.Mock())
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
@@ -1009,7 +1020,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
self.assertEqual(expected_allocations, actual_allocations)
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.10', json=mock.ANY, raise_exc=False,
- headers={})
+ headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(res)
@@ -1043,7 +1054,8 @@ class TestPutAllocations(SchedulerReportClientTestCase):
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
- consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
+ self.context, consumer_uuid, uuids.source, user_id, project_id,
+ mock.Mock())
self.ks_adap_mock.get.assert_called()
self.ks_adap_mock.put.assert_not_called()
@@ -1061,7 +1073,8 @@ class TestPutAllocations(SchedulerReportClientTestCase):
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
- consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
+ self.context, consumer_uuid, uuids.source, user_id, project_id,
+ mock.Mock())
self.ks_adap_mock.get.assert_called()
self.ks_adap_mock.put.assert_not_called()
@@ -1193,11 +1206,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
- '_get_providers_in_aggregates')
+ '_get_sharing_providers')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
def test_ensure_resource_provider_exists_in_cache(self, get_rpt_mock,
- get_pia_mock, get_trait_mock, get_agg_mock, create_rp_mock):
+ get_shr_mock, get_trait_mock, get_agg_mock, create_rp_mock):
# Override the client object's cache to contain a resource provider
# object for the compute host and check that
# _ensure_resource_provider() doesn't call _get_resource_provider() or
@@ -1219,7 +1232,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
set(),
set(['CUSTOM_BRONZE'])
]
- get_pia_mock.return_value = [
+ get_shr_mock.return_value = [
{
'uuid': uuids.shr1,
'name': 'sharing1',
@@ -1232,7 +1245,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
},
]
self.client._ensure_resource_provider(self.context, cn.uuid)
- get_pia_mock.assert_called_once_with(set([uuids.agg1, uuids.agg2]))
+ get_shr_mock.assert_called_once_with(set([uuids.agg1, uuids.agg2]))
self.assertTrue(self.client._provider_tree.exists(uuids.shr1))
self.assertTrue(self.client._provider_tree.exists(uuids.shr2))
# _get_provider_aggregates and _traits were called thrice: one for the
@@ -1270,11 +1283,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
- '_get_providers_in_aggregates')
+ '_get_sharing_providers')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_providers_in_tree')
- def test_ensure_resource_provider_get(self, get_rpt_mock, get_pia_mock,
- get_trait_mock, get_agg_mock, create_rp_mock):
+ def test_ensure_resource_provider_get(self, get_rpt_mock, get_shr_mock,
+ get_trait_mock, get_agg_mock, create_rp_mock):
# No resource provider exists in the client's cache, so validate that
# if we get the resource provider from the placement API that we don't
# try to create the resource provider.
@@ -1286,7 +1299,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
get_agg_mock.return_value = set([uuids.agg1])
get_trait_mock.return_value = set(['CUSTOM_GOLD'])
- get_pia_mock.return_value = []
+ get_shr_mock.return_value = []
self.client._ensure_resource_provider(self.context, uuids.compute_node)
@@ -1306,7 +1319,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertFalse(
self.client._provider_tree.has_traits(uuids.compute_node,
['CUSTOM_SILVER']))
- get_pia_mock.assert_called_once_with(set([uuids.agg1]))
+ get_shr_mock.assert_called_once_with(set([uuids.agg1]))
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
self.assertFalse(create_rp_mock.called)
@@ -1673,8 +1686,10 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertEqual(uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
- def test_get_providers_in_aggregates(self):
- # Ensure _get_providers_in_aggregates() returns a list of resource
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
+ '_get_provider_traits')
+ def test_get_sharing_providers(self, mock_get_traits):
+ # Ensure _get_sharing_providers() returns a list of resource
# provider dicts if it finds resource provider records from the
# placement API
resp_mock = mock.Mock(status_code=200)
@@ -1699,22 +1714,26 @@ class TestProviderOperations(SchedulerReportClientTestCase):
resp_mock.json.return_value = {'resource_providers': rpjson}
self.ks_adap_mock.get.return_value = resp_mock
- result = self.client._get_providers_in_aggregates([uuids.agg1,
- uuids.agg2])
+ mock_get_traits.side_effect = [
+ set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']),
+ set(['CUSTOM_BAR']),
+ ]
+ result = self.client._get_sharing_providers([uuids.agg1, uuids.agg2])
expected_url = ('/resource_providers?member_of=in:' +
','.join((uuids.agg1, uuids.agg2)))
self.ks_adap_mock.get.assert_called_once_with(
expected_url, raise_exc=False, microversion='1.3')
- self.assertEqual(rpjson, result)
+ self.assertEqual(rpjson[:1], result)
- def test_get_providers_in_aggregates_emptylist(self):
- self.assertEqual([], self.client._get_providers_in_aggregates([]))
+ def test_get_sharing_providers_emptylist(self):
+ self.assertEqual(
+ [], self.client._get_sharing_providers([]))
self.ks_adap_mock.get.assert_not_called()
@mock.patch.object(report.LOG, 'error')
- def test_get_providers_in_aggregates_error(self, logging_mock):
- # Ensure _get_providers_in_aggregates() logs an error and raises if the
+ def test_get_sharing_providers_error(self, logging_mock):
+ # Ensure _get_sharing_providers() logs an error and raises if the
# placement API call doesn't respond 200
resp_mock = mock.Mock(status_code=503)
self.ks_adap_mock.get.return_value = resp_mock
@@ -1723,7 +1742,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
uuid = uuids.agg
self.assertRaises(exception.ResourceProviderRetrievalFailed,
- self.client._get_providers_in_aggregates, [uuid])
+ self.client._get_sharing_providers, [uuid])
expected_url = '/resource_providers?member_of=in:' + uuid
self.ks_adap_mock.get.assert_called_once_with(
@@ -1983,11 +2002,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertEqual(set(),
self.client._provider_tree.data(uuids.rp).aggregates)
- self.client.set_aggregates_for_provider(uuids.rp, aggs)
+ self.client.set_aggregates_for_provider(self.context, uuids.rp, aggs)
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/aggregates' % uuids.rp, json=aggs,
- raise_exc=False, microversion='1.1', headers={})
+ raise_exc=False, microversion='1.1',
+ headers={'X-Openstack-Request-Id': self.context.global_id})
# Cache was updated
self.assertEqual(set(aggs),
self.client._provider_tree.data(uuids.rp).aggregates)
@@ -1996,7 +2016,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.ks_adap_mock.put.return_value = mock.Mock(status_code=503)
self.assertRaises(
exception.ResourceProviderUpdateFailed,
- self.client.set_aggregates_for_provider, uuids.rp, [])
+ self.client.set_aggregates_for_provider,
+ self.context, uuids.rp, [])
class TestAggregates(SchedulerReportClientTestCase):
@@ -2101,18 +2122,20 @@ class TestTraits(SchedulerReportClientTestCase):
# Request all traits; custom traits need to be created
get_mock.json.return_value = {'traits': standard_traits}
- self.client._ensure_traits(all_traits)
+ self.client._ensure_traits(self.context, all_traits)
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(all_traits), **self.trait_api_kwargs)
self.ks_adap_mock.put.assert_has_calls(
- [mock.call('/traits/' + trait, headers={}, **self.trait_api_kwargs)
+ [mock.call('/traits/' + trait,
+ headers={'X-Openstack-Request-Id': self.context.global_id},
+ **self.trait_api_kwargs)
for trait in custom_traits], any_order=True)
self.ks_adap_mock.reset_mock()
# Request standard traits; no traits need to be created
get_mock.json.return_value = {'traits': standard_traits}
- self.client._ensure_traits(standard_traits)
+ self.client._ensure_traits(self.context, standard_traits)
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(standard_traits),
**self.trait_api_kwargs)
@@ -2121,8 +2144,8 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.reset_mock()
# Request no traits - short circuit
- self.client._ensure_traits(None)
- self.client._ensure_traits([])
+ self.client._ensure_traits(self.context, None)
+ self.client._ensure_traits(self.context, [])
self.ks_adap_mock.get.assert_not_called()
self.ks_adap_mock.put.assert_not_called()
@@ -2130,7 +2153,8 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.get.return_value = mock.Mock(status_code=400)
self.assertRaises(exception.TraitRetrievalFailed,
- self.client._ensure_traits, ['FOO'])
+ self.client._ensure_traits,
+ self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:FOO', **self.trait_api_kwargs)
@@ -2145,12 +2169,15 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.put.return_value = put_mock
self.assertRaises(exception.TraitCreationFailed,
- self.client._ensure_traits, ['FOO'])
+ self.client._ensure_traits,
+ self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:FOO', **self.trait_api_kwargs)
self.ks_adap_mock.put.assert_called_once_with(
- '/traits/FOO', headers={}, **self.trait_api_kwargs)
+ '/traits/FOO',
+ headers={'X-Openstack-Request-Id': self.context.global_id},
+ **self.trait_api_kwargs)
def test_set_traits_for_provider(self):
traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA']
@@ -2170,7 +2197,7 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.put.return_value = put_mock
# Invoke
- self.client.set_traits_for_provider(uuids.rp, traits)
+ self.client.set_traits_for_provider(self.context, uuids.rp, traits)
# Verify API calls
self.ks_adap_mock.get.assert_called_once_with(
@@ -2178,7 +2205,8 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/traits' % uuids.rp,
json={'traits': traits, 'resource_provider_generation': 0},
- headers={}, **self.trait_api_kwargs)
+ headers={'X-Openstack-Request-Id': self.context.global_id},
+ **self.trait_api_kwargs)
# And ensure the provider tree cache was updated appropriately
self.assertFalse(
@@ -2199,7 +2227,8 @@ class TestTraits(SchedulerReportClientTestCase):
get_mock.status_code = 400
self.assertRaises(
exception.TraitRetrievalFailed,
- self.client.set_traits_for_provider, uuids.rp, traits)
+ self.client.set_traits_for_provider,
+ self.context, uuids.rp, traits)
self.ks_adap_mock.put.assert_not_called()
get_mock.status_code = 200
@@ -2209,13 +2238,15 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.put.return_value = mock.Mock(status_code=409)
self.assertRaises(
exception.ResourceProviderUpdateConflict,
- self.client.set_traits_for_provider, uuids.rp, traits)
+ self.client.set_traits_for_provider,
+ self.context, uuids.rp, traits)
# Other error
self.ks_adap_mock.put.return_value = mock.Mock(status_code=503)
self.assertRaises(
exception.ResourceProviderUpdateFailed,
- self.client.set_traits_for_provider, uuids.rp, traits)
+ self.client.set_traits_for_provider,
+ self.context, uuids.rp, traits)
class TestAssociations(SchedulerReportClientTestCase):
@@ -2224,8 +2255,8 @@ class TestAssociations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
- '_get_providers_in_aggregates')
- def test_refresh_associations_no_last(self, mock_pia_get, mock_trait_get,
+ '_get_sharing_providers')
+ def test_refresh_associations_no_last(self, mock_shr_get, mock_trait_get,
mock_agg_get):
"""Test that associations are refreshed when stale."""
uuid = uuids.compute_node
@@ -2236,7 +2267,7 @@ class TestAssociations(SchedulerReportClientTestCase):
self.client._refresh_associations(uuid)
mock_agg_get.assert_called_once_with(uuid)
mock_trait_get.assert_called_once_with(uuid)
- mock_pia_get.assert_called_once_with(mock_agg_get.return_value)
+ mock_shr_get.assert_called_once_with(mock_agg_get.return_value)
self.assertIn(uuid, self.client.association_refresh_time)
self.assertTrue(
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
@@ -2252,8 +2283,8 @@ class TestAssociations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
- '_get_providers_in_aggregates')
- def test_refresh_associations_no_refresh_sharing(self, mock_pia_get,
+ '_get_sharing_providers')
+ def test_refresh_associations_no_refresh_sharing(self, mock_shr_get,
mock_trait_get,
mock_agg_get):
"""Test refresh_sharing=False."""
@@ -2265,7 +2296,7 @@ class TestAssociations(SchedulerReportClientTestCase):
self.client._refresh_associations(uuid, refresh_sharing=False)
mock_agg_get.assert_called_once_with(uuid)
mock_trait_get.assert_called_once_with(uuid)
- mock_pia_get.assert_not_called()
+ mock_shr_get.assert_not_called()
self.assertIn(uuid, self.client.association_refresh_time)
self.assertTrue(
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
@@ -2281,10 +2312,10 @@ class TestAssociations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
- '_get_providers_in_aggregates')
+ '_get_sharing_providers')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_associations_stale')
- def test_refresh_associations_not_stale(self, mock_stale, mock_pia_get,
+ def test_refresh_associations_not_stale(self, mock_stale, mock_shr_get,
mock_trait_get, mock_agg_get):
"""Test that refresh associations is not called when the map is
not stale.
@@ -2294,7 +2325,7 @@ class TestAssociations(SchedulerReportClientTestCase):
self.client._refresh_associations(uuid)
mock_agg_get.assert_not_called()
mock_trait_get.assert_not_called()
- mock_pia_get.assert_not_called()
+ mock_shr_get.assert_not_called()
self.assertFalse(self.client.association_refresh_time)
@mock.patch.object(report.LOG, 'debug')
@@ -2303,8 +2334,8 @@ class TestAssociations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
- '_get_providers_in_aggregates')
- def test_refresh_associations_time(self, mock_pia_get, mock_trait_get,
+ '_get_sharing_providers')
+ def test_refresh_associations_time(self, mock_shr_get, mock_trait_get,
mock_agg_get, log_mock):
"""Test that refresh associations is called when the map is stale."""
uuid = uuids.compute_node
@@ -2312,14 +2343,14 @@ class TestAssociations(SchedulerReportClientTestCase):
self.client._provider_tree.new_root('compute', uuid, 1)
mock_agg_get.return_value = set([])
mock_trait_get.return_value = set([])
- mock_pia_get.return_value = []
+ mock_shr_get.return_value = []
# Called a first time because association_refresh_time is empty.
now = time.time()
self.client._refresh_associations(uuid)
mock_agg_get.assert_called_once_with(uuid)
mock_trait_get.assert_called_once_with(uuid)
- mock_pia_get.assert_called_once_with(set())
+ mock_shr_get.assert_called_once_with(set())
log_mock.assert_has_calls([
mock.call('Refreshing aggregate associations for resource '
'provider %s, aggregates: %s', uuid, 'None'),
@@ -2331,7 +2362,7 @@ class TestAssociations(SchedulerReportClientTestCase):
# Clear call count.
mock_agg_get.reset_mock()
mock_trait_get.reset_mock()
- mock_pia_get.reset_mock()
+ mock_shr_get.reset_mock()
with mock.patch('time.time') as mock_future:
# Not called a second time because not enough time has passed.
@@ -2339,14 +2370,14 @@ class TestAssociations(SchedulerReportClientTestCase):
self.client._refresh_associations(uuid)
mock_agg_get.assert_not_called()
mock_trait_get.assert_not_called()
- mock_pia_get.assert_not_called()
+ mock_shr_get.assert_not_called()
# Called because time has passed.
mock_future.return_value = now + report.ASSOCIATION_REFRESH + 1
self.client._refresh_associations(uuid)
mock_agg_get.assert_called_once_with(uuid)
mock_trait_get.assert_called_once_with(uuid)
- mock_pia_get.assert_called_once_with(set())
+ mock_shr_get.assert_called_once_with(set())
class TestComputeNodeToInventoryDict(test.NoDBTestCase):
@@ -3506,7 +3537,8 @@ class TestAllocations(SchedulerReportClientTestCase):
self.client.update_instance_allocation(self.context, cn, inst, 1)
mock_put.assert_called_once_with(
'/allocations/%s' % inst.uuid,
- expected, version='1.8')
+ expected, version='1.8',
+ global_request_id=self.context.global_id)
self.assertTrue(mock_get.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 77cf199552..3cd945cbc8 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -6913,8 +6913,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_disconnect_volume.assert_called_with(
None, connection_info, instance, encryption=None)
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
- def test_detach_volume_disk_not_found(self, mock_get_domain):
+ def test_detach_volume_disk_not_found(self, mock_get_domain,
+ mock_disconnect_volume):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml_without_disk = """<domain>
@@ -6930,10 +6932,41 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234,
5678]
mock_get_domain.return_value = mock_dom
- self.assertRaises(exception.DiskNotFound, drvr.detach_volume,
- connection_info, instance, '/dev/vdc')
+
+ drvr.detach_volume(connection_info, instance, '/dev/vdc')
mock_get_domain.assert_called_once_with(instance)
+ mock_disconnect_volume.assert_called_once_with(
+ None, connection_info, instance, encryption=None)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
+ @mock.patch('nova.virt.libvirt.host.Host._get_domain')
+ def test_detach_volume_disk_not_found_encryption(self, mock_get_domain,
+ mock_disconnect_volume,
+ mock_get_encryptor):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ mock_xml_without_disk = """<domain>
+ <devices>
+ </devices>
+</domain>"""
+ mock_dom = mock.MagicMock(return_value=mock_xml_without_disk)
+ encryption = mock.MagicMock()
+
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"device_path": "/fake",
+ "access_mode": "rw"}}
+
+ mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234,
+ 5678]
+ mock_get_domain.return_value = mock_dom
+
+ drvr.detach_volume(connection_info, instance, '/dev/vdc',
+ encryption)
+
+ mock_disconnect_volume.assert_called_once_with(
+ None, connection_info, instance, encryption=encryption)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
index 43db5fbf5a..bb6264d19a 100644
--- a/nova/tests/unit/virt/libvirt/test_vif.py
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -1057,7 +1057,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
def test_unplug_vrouter_with_details(self, mock_unplug_contrail):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_vrouter)
- mock_unplug_contrail.assert_called_once_with(self.vif_vrouter)
+ mock_unplug_contrail.assert_called_once_with(self.vif_vrouter['id'])
@mock.patch('nova.privsep.libvirt.plug_contrail_vif')
def test_plug_vrouter_with_details(self, mock_plug_contrail):
@@ -1076,7 +1076,10 @@ class LibvirtVifTestCase(test.NoDBTestCase):
mock.call('ip', 'link', 'set', 'tap-xxx-yyy-zzz', 'up',
run_as_root=True, check_exit_code=[0, 2, 254])])
mock_plug_contrail.called_once_with(
- instance, self.vif_vrouter, '0.0.0.0', None, 'NovaVMPort')
+ instance.project_id, instance.uuid, instance.display_name,
+ self.vif_vrouter['id'], self.vif_vrouter['network']['id'],
+ 'NovaVMPort', self.vif_vrouter['devname'],
+ self.vif_vrouter['address'], '0.0.0.0', None)
@mock.patch('nova.network.linux_net.create_tap_dev')
@mock.patch('nova.privsep.libvirt.plug_contrail_vif')
@@ -1095,8 +1098,11 @@ class LibvirtVifTestCase(test.NoDBTestCase):
mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz',
multiqueue=True)
- mock_plug_contrail.assert_called_once_with(
- instance, self.vif_vrouter, '0.0.0.0', None, 'NovaVMPort')
+ mock_plug_contrail.called_once_with(
+ instance.project_id, instance.uuid, instance.display_name,
+ self.vif_vrouter['id'], self.vif_vrouter['network']['id'],
+ 'NovaVMPort', self.vif_vrouter['devname'],
+ self.vif_vrouter['address'], '0.0.0.0', None)
def test_ivs_ethernet_driver(self):
d = vif.LibvirtGenericVIFDriver()
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 0f2cb8df4a..a32f8bdd3c 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -76,7 +76,7 @@ _UNPROVISION_STATES = (ironic_states.ACTIVE, ironic_states.DEPLOYFAIL,
_NODE_FIELDS = ('uuid', 'power_state', 'target_power_state', 'provision_state',
'target_provision_state', 'last_error', 'maintenance',
- 'properties', 'instance_uuid', 'traits')
+ 'properties', 'instance_uuid', 'traits', 'resource_class')
# Console state checking interval in seconds
_CONSOLE_STATE_CHECKING_INTERVAL = 1
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index e8b4bf0aaf..c2e0e6a569 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1563,7 +1563,11 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.warning("During detach_volume, instance disappeared.",
instance=instance)
except exception.DeviceNotFound:
- raise exception.DiskNotFound(location=disk_dev)
+ # We should still try to disconnect logical device from
+ # host, an error might have happened during a previous
+ # call.
+ LOG.info("Device %s not found in instance.",
+ disk_dev, instance=instance)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index b0a75bd11b..6d5f2385e9 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -724,7 +724,17 @@ class LibvirtGenericVIFDriver(object):
instance.flavor)
linux_net.create_tap_dev(dev, multiqueue=multiqueue)
nova.privsep.libvirt.plug_contrail_vif(
- instance, vif, ip_addr, ip6_addr, ptype)
+ instance.project_id,
+ instance.uuid,
+ instance.display_name,
+ vif['id'],
+ vif['network']['id'],
+ ptype,
+ dev,
+ vif['address'],
+ ip_addr,
+ ip6_addr,
+ )
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
@@ -875,8 +885,9 @@ class LibvirtGenericVIFDriver(object):
Unbind the vif from a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
+ port_id = vif['id']
try:
- nova.privsep.libvirt.unplug_contrail_vif(vif)
+ nova.privsep.libvirt.unplug_contrail_vif(port_id)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)