diff options
36 files changed, 1073 insertions, 191 deletions
diff --git a/.gitreview b/.gitreview index 1081799851..b42001bb1c 100644 --- a/.gitreview +++ b/.gitreview @@ -1,5 +1,5 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/nova.git defaultbranch=stable/queens diff --git a/.zuul.yaml b/.zuul.yaml index 28d2873b1d..9270e01447 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -8,7 +8,7 @@ Contains common configuration. timeout: 10800 required-projects: - - openstack-infra/devstack-gate + - openstack/devstack-gate - openstack/nova - openstack/tempest irrelevant-files: @@ -35,7 +35,7 @@ each other. timeout: 10800 required-projects: - - openstack-infra/devstack-gate + - openstack/devstack-gate - openstack/nova - openstack/tempest irrelevant-files: @@ -154,6 +154,38 @@ devstack_localrc: TEMPEST_COMPUTE_TYPE: compute_legacy +- job: + name: nova-grenade-live-migration + parent: nova-dsvm-multinode-base + description: | + Multi-node grenade job which runs nova/tests/live_migration/hooks tests. + In other words, this tests live migration with mixed-version compute + services which is important for things like rolling upgrade support. + The former name for this job was + "legacy-grenade-dsvm-neutron-multinode-live-migration". + required-projects: + - openstack/grenade + run: playbooks/legacy/nova-grenade-live-migration/run.yaml + post-run: playbooks/legacy/nova-grenade-live-migration/post.yaml + irrelevant-files: + - ^(placement-)?api-.*$ + - ^(test-|)requirements.txt$ + - ^.*\.rst$ + - ^.git.*$ + - ^api-.*$ + - ^doc/.*$ + - ^nova/hacking/.*$ + - ^nova/locale/.*$ + - ^nova/tests/.*\.py$ + - ^nova/tests/functional/.*$ + - ^nova/tests/unit/.*$ + - ^releasenotes/.*$ + - ^setup.cfg$ + - ^tests-py3.txt$ + - ^tools/.*$ + - ^tox.ini$ + voting: false + - project: # Please try to keep the list of job names sorted alphabetically. templates: @@ -188,6 +220,7 @@ - ^tools/.*$ - ^tox.ini$ - nova-cells-v1 + - nova-grenade-live-migration - nova-live-migration - nova-multiattach - nova-next @@ -223,25 +256,6 @@ - ^tests-py3.txt$ - ^tools/.*$ - ^tox.ini$ - - legacy-grenade-dsvm-neutron-multinode-live-migration: - voting: false - irrelevant-files: - - ^(placement-)?api-.*$ - - ^(test-|)requirements.txt$ - - ^.*\.rst$ - - ^.git.*$ - - ^api-.*$ - - ^doc/.*$ - - ^nova/hacking/.*$ - - ^nova/locale/.*$ - - ^nova/tests/.*\.py$ - - ^nova/tests/functional/.*$ - - ^nova/tests/unit/.*$ - - ^releasenotes/.*$ - - ^setup.cfg$ - - ^tests-py3.txt$ - - ^tools/.*$ - - ^tox.ini$ - devstack-plugin-ceph-tempest: voting: false irrelevant-files: diff --git a/nova/compute/api.py b/nova/compute/api.py index 7b6cca444c..bc3cc6df95 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -57,6 +57,7 @@ from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import context as nova_context from nova import crypto from nova.db import base +from nova.db.sqlalchemy import api as db_api from nova import exception from nova import exception_wrapper from nova import hooks @@ -886,6 +887,20 @@ class API(base.Base): # by the network quotas return base_options, max_network_count, key_pair, security_groups + @staticmethod + @db_api.api_context_manager.writer + def _create_reqspec_buildreq_instmapping(context, rs, br, im): + """Create the request spec, build request, and instance mapping in a + single database transaction. + + The RequestContext must be passed in to this method so that the + database transaction context manager decorator will nest properly and + include each create() into the same transaction context. + """ + rs.create() + br.create() + im.create() + def _provision_instances(self, context, instance_type, min_count, max_count, base_options, boot_meta, security_groups, block_device_mapping, shutdown_terminate, @@ -915,7 +930,6 @@ class API(base.Base): # spec as this is how the conductor knows how many were in this # batch. req_spec.num_instances = num_instances - req_spec.create() # Create an instance object, but do not store in db yet. instance = objects.Instance(context=context) @@ -939,7 +953,6 @@ class API(base.Base): project_id=instance.project_id, block_device_mappings=block_device_mapping, tags=instance_tags) - build_request.create() # Create an instance_mapping. The null cell_mapping indicates # that the instance doesn't yet exist in a cell, and lookups @@ -951,7 +964,14 @@ class API(base.Base): inst_mapping.instance_uuid = instance_uuid inst_mapping.project_id = context.project_id inst_mapping.cell_mapping = None - inst_mapping.create() + + # Create the request spec, build request, and instance mapping + # records in a single transaction so that if a DBError is + # raised from any of them, all INSERTs will be rolled back and + # no orphaned records will be left behind. + self._create_reqspec_buildreq_instmapping(context, req_spec, + build_request, + inst_mapping) instances_to_build.append( (req_spec, build_request, inst_mapping)) @@ -3290,6 +3310,19 @@ class API(base.Base): self._check_quota_for_upsize(context, instance, instance.flavor, instance.old_flavor) + # The AZ for the server may have changed when it was migrated so while + # we are in the API and have access to the API DB, update the + # instance.availability_zone before casting off to the compute service. + # Note that we do this in the API to avoid an "up-call" from the + # compute service to the API DB. This is not great in case something + # fails during revert before the instance.host is updated to the + # original source host, but it is good enough for now. Long-term we + # could consider passing the AZ down to compute so it can set it when + # the instance.host value is set in finish_revert_resize. + instance.availability_zone = ( + availability_zones.get_host_availability_zone( + context, migration.source_compute)) + instance.task_state = task_states.RESIZE_REVERTING instance.save(expected_task_state=[None]) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 21faf303a0..6f81150cce 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -5344,8 +5344,16 @@ class ComputeManager(manager.Manager): 'mountpoint': bdm['mount_device']}, instance=instance) if bdm['attachment_id']: - self.volume_api.attachment_delete(context, - bdm['attachment_id']) + # Try to delete the attachment to make the volume + # available again. Note that DriverVolumeBlockDevice + # may have already deleted the attachment so ignore + # VolumeAttachmentNotFound. + try: + self.volume_api.attachment_delete( + context, bdm['attachment_id']) + except exception.VolumeAttachmentNotFound as exc: + LOG.debug('Ignoring VolumeAttachmentNotFound: %s', + exc, instance=instance) else: self.volume_api.unreserve_volume(context, bdm.volume_id) compute_utils.notify_about_volume_attach_detach( @@ -5669,9 +5677,9 @@ class ComputeManager(manager.Manager): # new style attachments (v3.44). Once we drop support for old style # attachments we could think about cleaning up the cinder-initiated # swap volume API flows. - is_cinder_migration = ( - True if old_volume['status'] in ('retyping', - 'migrating') else False) + is_cinder_migration = False + if 'migration_status' in old_volume: + is_cinder_migration = old_volume['migration_status'] == 'migrating' old_vol_size = old_volume['size'] new_volume = self.volume_api.get(context, new_volume_id) new_vol_size = new_volume['size'] diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index ff0169cc84..9dda311cfc 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -965,8 +965,7 @@ class ComputeTaskManager(base.Base): elif recreate: # NOTE(sbauza): Augment the RequestSpec object by excluding # the source host for avoiding the scheduler to pick it - request_spec.ignore_hosts = request_spec.ignore_hosts or [] - request_spec.ignore_hosts.append(instance.host) + request_spec.ignore_hosts = [instance.host] # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host @@ -1318,13 +1317,6 @@ class ComputeTaskManager(base.Base): 'build_instances', updates, exc, request_spec) - # TODO(mnaser): The cell mapping should already be populated by - # this point to avoid setting it below here. - inst_mapping = objects.InstanceMapping.get_by_instance_uuid( - context, instance.uuid) - inst_mapping.cell_mapping = cell - inst_mapping.save() - # In order to properly clean-up volumes when deleting a server in # ERROR status with no host, we need to store BDMs in the same # cell. @@ -1340,6 +1332,18 @@ class ComputeTaskManager(base.Base): with nova_context.target_cell(context, cell) as cctxt: self._create_tags(cctxt, instance.uuid, tags) + # NOTE(mdbooth): To avoid an incomplete instance record being + # returned by the API, the instance mapping must be + # created after the instance record is complete in + # the cell, and before the build request is + # destroyed. + # TODO(mnaser): The cell mapping should already be populated by + # this point to avoid setting it below here. + inst_mapping = objects.InstanceMapping.get_by_instance_uuid( + context, instance.uuid) + inst_mapping.cell_mapping = cell + inst_mapping.save() + # Be paranoid about artifacts being deleted underneath us. try: build_request.destroy() diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py index 52383acd73..0234b6b9ad 100644 --- a/nova/conductor/tasks/live_migrate.py +++ b/nova/conductor/tasks/live_migrate.py @@ -14,6 +14,7 @@ from oslo_log import log as logging import oslo_messaging as messaging import six +from nova import availability_zones from nova.compute import power_state from nova.conductor.tasks import base from nova.conductor.tasks import migrate @@ -99,6 +100,10 @@ class LiveMigrationTask(base.TaskBase): # node name off it to set in the Migration object below. dest_node = dest_node.hypervisor_hostname + self.instance.availability_zone = ( + availability_zones.get_host_availability_zone( + self.context, self.destination)) + self.migration.source_node = self.instance.node self.migration.dest_node = dest_node self.migration.dest_compute = self.destination diff --git a/nova/image/glance.py b/nova/image/glance.py index 1a8d532afb..13bfd90886 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -160,21 +160,35 @@ class GlanceClientWrapper(object): self.api_server = next(self.api_servers) return _glanceclient_from_endpoint(context, self.api_server, version) - def call(self, context, version, method, *args, **kwargs): + def call(self, context, version, method, controller=None, args=None, + kwargs=None): """Call a glance client method. If we get a connection error, retry the request according to CONF.glance.num_retries. + + :param context: RequestContext to use + :param version: Numeric version of the *Glance API* to use + :param method: string method name to execute on the glanceclient + :param controller: optional string name of the client controller to + use. Default (None) is to use the 'images' + controller + :param args: optional iterable of arguments to pass to the + glanceclient method, splatted as positional args + :param kwargs: optional dict of arguments to pass to the glanceclient, + splatted into named arguments """ + args = args or [] + kwargs = kwargs or {} retry_excs = (glanceclient.exc.ServiceUnavailable, glanceclient.exc.InvalidEndpoint, glanceclient.exc.CommunicationError) num_attempts = 1 + CONF.glance.num_retries + controller_name = controller or 'images' for attempt in range(1, num_attempts + 1): client = self.client or self._create_onetime_client(context, version) try: - controller = getattr(client, - kwargs.pop('controller', 'images')) + controller = getattr(client, controller_name) result = getattr(controller, method)(*args, **kwargs) if inspect.isgenerator(result): # Convert generator results to a list, so that we can @@ -238,7 +252,7 @@ class GlanceImageServiceV2(object): image is deleted. """ try: - image = self._client.call(context, 2, 'get', image_id) + image = self._client.call(context, 2, 'get', args=(image_id,)) except Exception: _reraise_translated_image_exception(image_id) @@ -273,7 +287,7 @@ class GlanceImageServiceV2(object): """Calls out to Glance for a list of detailed image information.""" params = _extract_query_params_v2(kwargs) try: - images = self._client.call(context, 2, 'list', **params) + images = self._client.call(context, 2, 'list', kwargs=params) except Exception: _reraise_translated_exception() @@ -318,7 +332,8 @@ class GlanceImageServiceV2(object): LOG.exception("Download image error") try: - image_chunks = self._client.call(context, 2, 'data', image_id) + image_chunks = self._client.call( + context, 2, 'data', args=(image_id,)) except Exception: _reraise_translated_image_exception(image_id) @@ -430,14 +445,14 @@ class GlanceImageServiceV2(object): def _add_location(self, context, image_id, location): # 'show_multiple_locations' must be enabled in glance api conf file. try: - return self._client.call(context, 2, 'add_location', image_id, - location, {}) + return self._client.call( + context, 2, 'add_location', args=(image_id, location, {})) except glanceclient.exc.HTTPBadRequest: _reraise_translated_exception() def _upload_data(self, context, image_id, data): - self._client.call(context, 2, 'upload', image_id, data) - return self._client.call(context, 2, 'get', image_id) + self._client.call(context, 2, 'upload', args=(image_id, data)) + return self._client.call(context, 2, 'get', args=(image_id,)) def _get_image_create_disk_format_default(self, context): """Gets an acceptable default image disk_format based on the schema. @@ -457,8 +472,8 @@ class GlanceImageServiceV2(object): # Get the image schema - note we don't cache this value since it could # change under us. This looks a bit funky, but what it's basically # doing is calling glanceclient.v2.Client.schemas.get('image'). - image_schema = self._client.call(context, 2, 'get', 'image', - controller='schemas') + image_schema = self._client.call( + context, 2, 'get', args=('image',), controller='schemas') # get the disk_format schema property from the raw schema disk_format_schema = ( image_schema.raw()['properties'].get('disk_format') if image_schema @@ -498,7 +513,7 @@ class GlanceImageServiceV2(object): location = sent_service_image_meta.pop('location', None) image = self._client.call( - context, 2, 'create', **sent_service_image_meta) + context, 2, 'create', kwargs=sent_service_image_meta) image_id = image['id'] # Sending image location in a separate request. @@ -542,7 +557,7 @@ class GlanceImageServiceV2(object): location = sent_service_image_meta.pop('location', None) image_id = sent_service_image_meta['image_id'] image = self._client.call( - context, 2, 'update', **sent_service_image_meta) + context, 2, 'update', kwargs=sent_service_image_meta) # Sending image location in a separate request. if location: @@ -565,7 +580,7 @@ class GlanceImageServiceV2(object): """ try: - self._client.call(context, 2, 'delete', image_id) + self._client.call(context, 2, 'delete', args=(image_id,)) except glanceclient.exc.NotFound: raise exception.ImageNotFound(image_id=image_id) except glanceclient.exc.HTTPForbidden: diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py index be9bd1e0d2..6cb5de1c15 100644 --- a/nova/objects/request_spec.py +++ b/nova/objects/request_spec.py @@ -450,7 +450,13 @@ class RequestSpec(base.NovaObject): # though they should match. if key in ['id', 'instance_uuid']: setattr(spec, key, db_spec[key]) - else: + elif key == 'ignore_hosts': + # NOTE(mriedem): Do not override the 'ignore_hosts' + # field which is not persisted. It is not a lazy-loadable + # field. If it is not set, set None. + if not spec.obj_attr_is_set(key): + setattr(spec, key, None) + elif key in spec_obj: setattr(spec, key, getattr(spec_obj, key)) spec._context = context @@ -511,9 +517,9 @@ class RequestSpec(base.NovaObject): if 'instance_group' in spec and spec.instance_group: spec.instance_group.members = None spec.instance_group.hosts = None - # NOTE(mriedem): Don't persist retries or requested_destination - # since those are per-request - for excluded in ('retry', 'requested_destination'): + # NOTE(mriedem): Don't persist retries, requested_destination + # or ignored hosts since those are per-request + for excluded in ('retry', 'requested_destination', 'ignore_hosts'): if excluded in spec and getattr(spec, excluded): setattr(spec, excluded, None) diff --git a/nova/tests/functional/db/test_compute_api.py b/nova/tests/functional/db/test_compute_api.py new file mode 100644 index 0000000000..5c5264e816 --- /dev/null +++ b/nova/tests/functional/db/test_compute_api.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.compute import api as compute_api +from nova import context as nova_context +from nova import exception +from nova import objects +from nova import test +from nova.tests import fixtures as nova_fixtures +import nova.tests.uuidsentinel as uuids + + +class ComputeAPITestCase(test.NoDBTestCase): + USES_DB_SELF = True + + def setUp(self): + super(ComputeAPITestCase, self).setUp() + self.useFixture(nova_fixtures.Database(database='api')) + + @mock.patch('nova.objects.instance_mapping.InstanceMapping.create') + def test_reqspec_buildreq_instmapping_single_transaction(self, + mock_create): + # Simulate a DBError during an INSERT by raising an exception from the + # InstanceMapping.create method. + mock_create.side_effect = test.TestingException('oops') + + ctxt = nova_context.RequestContext('fake-user', 'fake-project') + rs = objects.RequestSpec(context=ctxt, instance_uuid=uuids.inst) + # project_id and instance cannot be None + br = objects.BuildRequest(context=ctxt, instance_uuid=uuids.inst, + project_id=ctxt.project_id, + instance=objects.Instance()) + im = objects.InstanceMapping(context=ctxt, instance_uuid=uuids.inst) + + self.assertRaises( + test.TestingException, + compute_api.API._create_reqspec_buildreq_instmapping, ctxt, rs, br, + im) + + # Since the instance mapping failed to INSERT, we should not have + # written a request spec record or a build request record. + self.assertRaises( + exception.RequestSpecNotFound, + objects.RequestSpec.get_by_instance_uuid, ctxt, uuids.inst) + self.assertRaises( + exception.BuildRequestNotFound, + objects.BuildRequest.get_by_instance_uuid, ctxt, uuids.inst) diff --git a/nova/tests/functional/regressions/test_bug_1669054.py b/nova/tests/functional/regressions/test_bug_1669054.py new file mode 100644 index 0000000000..698bd1fcb8 --- /dev/null +++ b/nova/tests/functional/regressions/test_bug_1669054.py @@ -0,0 +1,84 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import context +from nova import objects +from nova.tests.functional import integrated_helpers +from nova.tests.unit import fake_network +from nova.virt import fake + + +class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase, + integrated_helpers.InstanceHelperMixin): + """Regression test for bug 1669054 introduced in Newton. + + When resizing a server, if CONF.allow_resize_to_same_host is False, + the API will set RequestSpec.ignore_hosts = [instance.host] and then + later in conductor the RequestSpec changes are saved to persist the new + flavor. This inadvertently saves the ignore_hosts value. Later if you + try to migrate, evacuate or unshelve the server, that original source + host will be ignored. If the deployment has a small number of computes, + like two in an edge node, then evacuate will fail because the only other + available host is ignored. This test recreates the scenario. + """ + # Set variables used in the parent class. + REQUIRES_LOCKING = False + ADMIN_API = True + USE_NEUTRON = True + _image_ref_parameter = 'imageRef' + _flavor_ref_parameter = 'flavorRef' + api_major_version = 'v2.1' + microversion = '2.11' # Need at least 2.11 for the force-down API + + def setUp(self): + super(ResizeEvacuateTestCase, self).setUp() + fake_network.set_stub_network_methods(self) + + def test_resize_then_evacuate(self): + # Create a server. At this point there is only one compute service. + flavors = self.api.get_flavors() + flavor1 = flavors[0]['id'] + server = self._build_server(flavor1) + server = self.api.post_server({'server': server}) + self._wait_for_state_change(self.api, server, 'ACTIVE') + + # Start up another compute service so we can resize. + fake.set_nodes(['host2']) + self.addCleanup(fake.restore_nodes) + host2 = self.start_service('compute', host='host2') + + # Now resize the server to move it to host2. + flavor2 = flavors[1]['id'] + req = {'resize': {'flavorRef': flavor2}} + self.api.post_server_action(server['id'], req) + server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host']) + self.api.post_server_action(server['id'], {'confirmResize': None}) + server = self._wait_for_state_change(self.api, server, 'ACTIVE') + + # Disable the host on which the server is now running (host2). + host2.stop() + self.api.force_down_service('host2', 'nova-compute', forced_down=True) + # Now try to evacuate the server back to the original source compute. + req = {'evacuate': {'onSharedStorage': False}} + self.api.post_server_action(server['id'], req) + server = self._wait_for_state_change(self.api, server, 'ACTIVE') + # The evacuate flow in the compute manager is annoying in that it + # sets the instance status to ACTIVE before updating the host, so we + # have to wait for the migration record to be 'done' to avoid a race. + self._wait_for_migration_status(server, ['done']) + self.assertEqual(self.compute.host, server['OS-EXT-SRV-ATTR:host']) + + # Assert the RequestSpec.ignore_hosts field is not populated. + reqspec = objects.RequestSpec.get_by_instance_uuid( + context.get_admin_context(), server['id']) + self.assertIsNone(reqspec.ignore_hosts) diff --git a/nova/tests/functional/test_availability_zones.py b/nova/tests/functional/test_availability_zones.py new file mode 100644 index 0000000000..23a83c607f --- /dev/null +++ b/nova/tests/functional/test_availability_zones.py @@ -0,0 +1,167 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nova import context +from nova import objects +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import integrated_helpers +from nova.tests.unit.image import fake as fake_image +from nova.tests.unit import policy_fixture +from nova.virt import fake + + +class TestAvailabilityZoneScheduling( + test.TestCase, integrated_helpers.InstanceHelperMixin): + + def setUp(self): + super(TestAvailabilityZoneScheduling, self).setUp() + + self.useFixture(policy_fixture.RealPolicyFixture()) + self.useFixture(nova_fixtures.NeutronFixture(self)) + self.useFixture(nova_fixtures.PlacementFixture()) + + api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')) + + self.api = api_fixture.admin_api + self.api.microversion = 'latest' + + fake_image.stub_out_image_service(self) + self.addCleanup(fake_image.FakeImageService_reset) + + self.start_service('conductor') + self.start_service('scheduler') + + # Start two compute services in separate zones. + self._start_host_in_zone('host1', 'zone1') + self._start_host_in_zone('host2', 'zone2') + + flavors = self.api.get_flavors() + self.flavor1 = flavors[0]['id'] + self.flavor2 = flavors[1]['id'] + + def _start_host_in_zone(self, host, zone): + # Start the nova-compute service. + fake.set_nodes([host]) + self.addCleanup(fake.restore_nodes) + self.start_service('compute', host=host) + # Create a host aggregate with a zone in which to put this host. + aggregate_body = { + "aggregate": { + "name": zone, + "availability_zone": zone + } + } + aggregate = self.api.api_post( + '/os-aggregates', aggregate_body).body['aggregate'] + # Now add the compute host to the aggregate. + add_host_body = { + "add_host": { + "host": host + } + } + self.api.api_post( + '/os-aggregates/%s/action' % aggregate['id'], add_host_body) + + def _create_server(self, name): + # Create a server, it doesn't matter which host it ends up in. + server_body = self._build_minimal_create_server_request( + self.api, name, image_uuid=fake_image.get_valid_image_id(), + flavor_id=self.flavor1, networks='none') + server = self.api.post_server({'server': server_body}) + server = self._wait_for_state_change(self.api, server, 'ACTIVE') + original_host = server['OS-EXT-SRV-ATTR:host'] + # Assert the server has the AZ set (not None or 'nova'). + expected_zone = 'zone1' if original_host == 'host1' else 'zone2' + self.assertEqual(expected_zone, server['OS-EXT-AZ:availability_zone']) + return server + + def _assert_instance_az(self, server, expected_zone): + # Check the API. + self.assertEqual(expected_zone, server['OS-EXT-AZ:availability_zone']) + # Check the DB. + ctxt = context.get_admin_context() + with context.target_cell( + ctxt, self.cell_mappings[test.CELL1_NAME]) as cctxt: + instance = objects.Instance.get_by_uuid(cctxt, server['id']) + self.assertEqual(expected_zone, instance.availability_zone) + + def test_live_migrate_implicit_az(self): + """Tests live migration of an instance with an implicit AZ. + + Before Pike, a server created without an explicit availability zone + was assigned a default AZ based on the "default_schedule_zone" config + option which defaults to None, which allows the instance to move + freely between availability zones. + + With change I8d426f2635232ffc4b510548a905794ca88d7f99 in Pike, if the + user does not request an availability zone, the + instance.availability_zone field is set based on the host chosen by + the scheduler. The default AZ for all nova-compute services is + determined by the "default_availability_zone" config option which + defaults to "nova". + + This test creates two nova-compute services in separate zones, creates + a server without specifying an explicit zone, and then tries to live + migrate the instance to the other compute which should succeed because + the request spec does not include an explicit AZ, so the instance is + still not restricted to its current zone even if it says it is in one. + """ + server = self._create_server('test_live_migrate_implicit_az') + original_host = server['OS-EXT-SRV-ATTR:host'] + + # Attempt to live migrate the instance; again, we don't specify a host + # because there are only two hosts so the scheduler would only be able + # to pick the second host which is in a different zone. + live_migrate_req = { + 'os-migrateLive': { + 'block_migration': 'auto', + 'host': None + } + } + self.api.post_server_action(server['id'], live_migrate_req) + + # Poll the migration until it is done. + migration = self._wait_for_migration_status(server, ['completed']) + self.assertEqual('live-migration', migration['migration_type']) + + # Assert that the server did move. Note that we check both the API and + # the database because the API will return the AZ from the host + # aggregate if instance.host is not None. + server = self.api.get_server(server['id']) + expected_zone = 'zone2' if original_host == 'host1' else 'zone1' + self._assert_instance_az(server, expected_zone) + + def test_resize_revert_across_azs(self): + """Creates two compute service hosts in separate AZs. Creates a server + without an explicit AZ so it lands in one AZ, and then resizes the + server which moves it to the other AZ. Then the resize is reverted and + asserts the server is shown as back in the original source host AZ. + """ + server = self._create_server('test_resize_revert_across_azs') + original_host = server['OS-EXT-SRV-ATTR:host'] + original_az = 'zone1' if original_host == 'host1' else 'zone2' + + # Resize the server which should move it to the other zone. + self.api.post_server_action( + server['id'], {'resize': {'flavorRef': self.flavor2}}) + server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE') + + # Now the server should be in the other AZ. + new_zone = 'zone2' if original_host == 'host1' else 'zone1' + self._assert_instance_az(server, new_zone) + + # Revert the resize and the server should be back in the original AZ. + self.api.post_server_action(server['id'], {'revertResize': None}) + server = self._wait_for_state_change(self.api, server, 'ACTIVE') + self._assert_instance_az(server, original_az) diff --git a/nova/tests/live_migration/hooks/run_tests.sh b/nova/tests/live_migration/hooks/run_tests.sh index f8683ecd86..76d37e30db 100755 --- a/nova/tests/live_migration/hooks/run_tests.sh +++ b/nova/tests/live_migration/hooks/run_tests.sh @@ -41,23 +41,25 @@ echo '2. NFS testing is skipped due to setup failures with Ubuntu 16.04' #run_tempest "NFS shared storage test" "live_migration" #nfs_teardown -echo '3. test with Ceph for root + ephemeral disks' -prepare_ceph -GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf} -configure_and_start_glance - -# Deal with grenade craziness... if [ "$GRENADE_OLD_BRANCH" ]; then - # NOTE(mriedem): Grenade runs in singleconductor mode, so it won't use - # /etc/nova/nova-cpu.conf so we have to overwrite NOVA_CPU_CONF which is - # read in configure_and_start_nova. - if ! is_service_enabled n-super-cond; then - NOVA_CPU_CONF=$NOVA_CONF - fi -fi + # NOTE(mriedem): Testing with ceph in the grenade live migration job is + # disabled because of a mess of changes in devstack from queens which + # result in the pike node running with nova.conf and the queens node + # running with nova-cpu.conf and _ceph_configure_nova (in ceph.sh) does + # not configure the nodes properly for rbd auth which makes rbd-backed + # live migration fail (because the nodes on shared storage can't + # communicate). Fixing that is non-trivial so we just skip ceph testing + # in the grenade case. + echo '2. test with Ceph is skipped due to bug 1813216' +else + echo '3. test with Ceph for root + ephemeral disks' + prepare_ceph + GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf} + configure_and_start_glance -configure_and_start_nova -run_tempest "Ceph nova&glance test" "^.*test_live_migration(?!.*(test_volume_backed_live_migration))" + configure_and_start_nova + run_tempest "Ceph nova&glance test" "^.*test_live_migration(?!.*(test_volume_backed_live_migration))" +fi set +e #echo '4. test with Ceph for volumes and root + ephemeral disk' diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index ecb86e5cc4..96074e76f9 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -487,6 +487,58 @@ class ComputeVolumeTestCase(BaseTestCase): exception=expected_exception), ]) + @mock.patch.object(compute_manager.LOG, 'debug') + @mock.patch.object(compute_utils, 'EventReporter') + @mock.patch('nova.context.RequestContext.elevated') + @mock.patch('nova.compute.utils.notify_about_volume_attach_detach') + def test_attach_volume_ignore_VolumeAttachmentNotFound( + self, mock_notify, mock_elevate, mock_event, mock_debug_log): + """Tests the scenario that the DriverVolumeBlockDevice.attach flow + already deleted the volume attachment before the + ComputeManager.attach_volume flow tries to rollback the attachment + record and delete it, which raises VolumeAttachmentNotFound and is + ignored. + """ + mock_elevate.return_value = self.context + + attachment_id = uuids.attachment_id + fake_bdm = objects.BlockDeviceMapping(**self.fake_volume) + fake_bdm.attachment_id = attachment_id + instance = self._create_fake_instance_obj() + expected_exception = test.TestingException() + + def fake_attach(*args, **kwargs): + raise expected_exception + + with test.nested( + mock.patch.object(driver_block_device.DriverVolumeBlockDevice, + 'attach'), + mock.patch.object(cinder.API, 'attachment_delete'), + mock.patch.object(objects.BlockDeviceMapping, + 'destroy') + ) as (mock_attach, mock_attach_delete, mock_destroy): + mock_attach.side_effect = fake_attach + mock_attach_delete.side_effect = \ + exception.VolumeAttachmentNotFound( + attachment_id=attachment_id) + self.assertRaises( + test.TestingException, self.compute.attach_volume, + self.context, instance, fake_bdm) + mock_destroy.assert_called_once_with() + mock_notify.assert_has_calls([ + mock.call(self.context, instance, 'fake-mini', + action='volume_attach', phase='start', + volume_id=uuids.volume_id), + mock.call(self.context, instance, 'fake-mini', + action='volume_attach', phase='error', + exception=expected_exception, + volume_id=uuids.volume_id), + ]) + mock_event.assert_called_once_with( + self.context, 'compute_attach_volume', instance.uuid) + self.assertIsInstance(mock_debug_log.call_args[0][1], + exception.VolumeAttachmentNotFound) + @mock.patch.object(compute_utils, 'EventReporter') def test_detach_volume_api_raises(self, mock_event): fake_bdm = objects.BlockDeviceMapping(**self.fake_volume) diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py index 02cde0ba27..67eaed3f6a 100644 --- a/nova/tests/unit/compute/test_compute_api.py +++ b/nova/tests/unit/compute/test_compute_api.py @@ -1839,12 +1839,14 @@ class _ComputeAPIUnitTestMixIn(object): def test_confirm_resize_with_migration_ref(self): self._test_confirm_resize(mig_ref_passed=True) + @mock.patch('nova.availability_zones.get_host_availability_zone', + return_value='nova') @mock.patch('nova.objects.Quotas.check_deltas') @mock.patch('nova.objects.Migration.get_by_instance_and_status') @mock.patch('nova.context.RequestContext.elevated') @mock.patch('nova.objects.RequestSpec.get_by_instance_uuid') def _test_revert_resize(self, mock_get_reqspec, mock_elevated, - mock_get_migration, mock_check): + mock_get_migration, mock_check, mock_get_host_az): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_inst.old_flavor = fake_inst.flavor @@ -1887,11 +1889,14 @@ class _ComputeAPIUnitTestMixIn(object): def test_revert_resize(self): self._test_revert_resize() + @mock.patch('nova.availability_zones.get_host_availability_zone', + return_value='nova') @mock.patch('nova.objects.Quotas.check_deltas') @mock.patch('nova.objects.Migration.get_by_instance_and_status') @mock.patch('nova.context.RequestContext.elevated') def test_revert_resize_concurrent_fail(self, mock_elevated, - mock_get_migration, mock_check): + mock_get_migration, mock_check, + mock_get_host_az): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_inst.old_flavor = fake_inst.flavor @@ -4409,6 +4414,9 @@ class _ComputeAPIUnitTestMixIn(object): mock_br, mock_rs): fake_keypair = objects.KeyPair(name='test') + @mock.patch.object(self.compute_api, + '_create_reqspec_buildreq_instmapping', + new=mock.MagicMock()) @mock.patch('nova.compute.utils.check_num_instances_quota') @mock.patch.object(self.compute_api, 'security_group_api') @mock.patch.object(self.compute_api, @@ -4441,6 +4449,8 @@ class _ComputeAPIUnitTestMixIn(object): do_test() def test_provision_instances_creates_build_request(self): + @mock.patch.object(self.compute_api, + '_create_reqspec_buildreq_instmapping') @mock.patch.object(objects.Instance, 'create') @mock.patch.object(self.compute_api, 'volume_api') @mock.patch('nova.compute.utils.check_num_instances_quota') @@ -4456,7 +4466,8 @@ class _ComputeAPIUnitTestMixIn(object): def do_test(mock_get_min_ver, mock_get_min_ver_cells, _mock_inst_mapping_create, mock_build_req, mock_req_spec_from_components, _mock_ensure_default, - mock_check_num_inst_quota, mock_volume, mock_inst_create): + mock_check_num_inst_quota, mock_volume, mock_inst_create, + mock_create_rs_br_im): min_count = 1 max_count = 2 @@ -4519,11 +4530,14 @@ class _ComputeAPIUnitTestMixIn(object): br.instance.project_id) self.assertEqual(1, br.block_device_mappings[0].id) self.assertEqual(br.instance.uuid, br.tags[0].resource_id) - br.create.assert_called_with() + mock_create_rs_br_im.assert_any_call(ctxt, rs, br, im) do_test() def test_provision_instances_creates_instance_mapping(self): + @mock.patch.object(self.compute_api, + '_create_reqspec_buildreq_instmapping', + new=mock.MagicMock()) @mock.patch('nova.compute.utils.check_num_instances_quota') @mock.patch.object(objects.Instance, 'create', new=mock.MagicMock()) @mock.patch.object(self.compute_api.security_group_api, @@ -4534,8 +4548,6 @@ class _ComputeAPIUnitTestMixIn(object): new=mock.MagicMock()) @mock.patch.object(objects.RequestSpec, 'from_components', mock.MagicMock()) - @mock.patch.object(objects.BuildRequest, 'create', - new=mock.MagicMock()) @mock.patch('nova.objects.InstanceMapping') def do_test(mock_inst_mapping, mock_check_num_inst_quota): inst_mapping_mock = mock.MagicMock() @@ -4614,6 +4626,8 @@ class _ComputeAPIUnitTestMixIn(object): _mock_cinder_reserve_volume, _mock_cinder_check_availability_zone, _mock_cinder_get, _mock_get_min_ver_cells, _mock_get_min_ver): + @mock.patch.object(self.compute_api, + '_create_reqspec_buildreq_instmapping') @mock.patch('nova.compute.utils.check_num_instances_quota') @mock.patch.object(objects, 'Instance') @mock.patch.object(self.compute_api.security_group_api, @@ -4624,7 +4638,8 @@ class _ComputeAPIUnitTestMixIn(object): @mock.patch.object(objects, 'InstanceMapping') def do_test(mock_inst_mapping, mock_build_req, mock_req_spec_from_components, _mock_create_bdm, - _mock_ensure_default, mock_inst, mock_check_num_inst_quota): + _mock_ensure_default, mock_inst, mock_check_num_inst_quota, + mock_create_rs_br_im): min_count = 1 max_count = 2 @@ -4689,9 +4704,10 @@ class _ComputeAPIUnitTestMixIn(object): check_server_group_quota, filter_properties, None, tags) # First instance, build_req, mapping is created and destroyed - self.assertTrue(build_req_mocks[0].create.called) + mock_create_rs_br_im.assert_called_once_with(ctxt, req_spec_mock, + build_req_mocks[0], + inst_map_mocks[0]) self.assertTrue(build_req_mocks[0].destroy.called) - self.assertTrue(inst_map_mocks[0].create.called) self.assertTrue(inst_map_mocks[0].destroy.called) # Second instance, build_req, mapping is not created nor destroyed self.assertFalse(inst_mocks[1].create.called) @@ -4716,6 +4732,8 @@ class _ComputeAPIUnitTestMixIn(object): _mock_bdm, _mock_cinder_attach_create, _mock_cinder_check_availability_zone, _mock_cinder_get, _mock_get_min_ver_cells, _mock_get_min_ver): + @mock.patch.object(self.compute_api, + '_create_reqspec_buildreq_instmapping') @mock.patch('nova.compute.utils.check_num_instances_quota') @mock.patch.object(objects, 'Instance') @mock.patch.object(self.compute_api.security_group_api, @@ -4726,7 +4744,8 @@ class _ComputeAPIUnitTestMixIn(object): @mock.patch.object(objects, 'InstanceMapping') def do_test(mock_inst_mapping, mock_build_req, mock_req_spec_from_components, _mock_create_bdm, - _mock_ensure_default, mock_inst, mock_check_num_inst_quota): + _mock_ensure_default, mock_inst, mock_check_num_inst_quota, + mock_create_rs_br_im): min_count = 1 max_count = 2 @@ -4791,9 +4810,10 @@ class _ComputeAPIUnitTestMixIn(object): check_server_group_quota, filter_properties, None, tags) # First instance, build_req, mapping is created and destroyed - self.assertTrue(build_req_mocks[0].create.called) + mock_create_rs_br_im.assert_called_once_with(ctxt, req_spec_mock, + build_req_mocks[0], + inst_map_mocks[0]) self.assertTrue(build_req_mocks[0].destroy.called) - self.assertTrue(inst_map_mocks[0].create.called) self.assertTrue(inst_map_mocks[0].destroy.called) # Second instance, build_req, mapping is not created nor destroyed self.assertFalse(inst_mocks[1].create.called) @@ -4804,6 +4824,9 @@ class _ComputeAPIUnitTestMixIn(object): do_test() def test_provision_instances_creates_reqspec_with_secgroups(self): + @mock.patch.object(self.compute_api, + '_create_reqspec_buildreq_instmapping', + new=mock.MagicMock()) @mock.patch('nova.compute.utils.check_num_instances_quota') @mock.patch.object(self.compute_api, 'security_group_api') @mock.patch.object(compute_api, 'objects') diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index fc5f29d748..a498bb67ea 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -2192,11 +2192,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): connection_info='{"data": {}}', volume_size=1) old_volume = { 'id': uuids.old_volume_id, 'size': 1, 'status': 'retyping', - 'multiattach': False + 'migration_status': 'migrating', 'multiattach': False } new_volume = { 'id': uuids.new_volume_id, 'size': 1, 'status': 'reserved', - 'multiattach': False + 'migration_status': 'migrating', 'multiattach': False } attachment_update.return_value = {"connection_info": {"data": {}}} get_bdm.return_value = bdm @@ -2338,12 +2338,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): attachment_id=uuids.old_attachment_id, connection_info='{"data": {}}') old_volume = { - 'id': uuids.old_volume_id, 'size': 1, 'status': 'migrating', - 'multiattach': False + 'id': uuids.old_volume_id, 'size': 1, 'status': 'in-use', + 'migration_status': 'migrating', 'multiattach': False } new_volume = { 'id': uuids.new_volume_id, 'size': 1, 'status': 'reserved', - 'multiattach': False + 'migration_status': 'migrating', 'multiattach': False } get_bdm.return_value = bdm get_volume.side_effect = (old_volume, new_volume) diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py index b65f5c20f1..fd8ec25a01 100644 --- a/nova/tests/unit/conductor/tasks/test_live_migrate.py +++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py @@ -66,7 +66,9 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase): servicegroup.API(), scheduler_client.SchedulerClient(), self.fake_spec) - def test_execute_with_destination(self, new_mode=True): + @mock.patch('nova.availability_zones.get_host_availability_zone', + return_value='fake-az') + def test_execute_with_destination(self, mock_get_az, new_mode=True): dest_node = objects.ComputeNode(hypervisor_hostname='dest_node') with test.nested( mock.patch.object(self.task, '_check_host_is_up'), @@ -107,6 +109,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase): migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) + mock_get_az.assert_called_once_with(self.context, self.destination) + self.assertEqual('fake-az', self.instance.availability_zone) # make sure the source/dest fields were set on the migration object self.assertEqual(self.instance.node, self.migration.source_node) self.assertEqual(dest_node.hypervisor_hostname, @@ -123,7 +127,9 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase): def test_execute_with_destination_old_school(self): self.test_execute_with_destination(new_mode=False) - def test_execute_without_destination(self): + @mock.patch('nova.availability_zones.get_host_availability_zone', + return_value='nova') + def test_execute_without_destination(self, mock_get_az): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) @@ -155,6 +161,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase): migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) + mock_get_az.assert_called_once_with(self.context, 'found_host') self.assertEqual('found_host', self.migration.dest_compute) self.assertEqual('found_node', self.migration.dest_node) self.assertEqual(self.instance.node, self.migration.source_node) diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py index b3599caad5..4ddc795d72 100644 --- a/nova/tests/unit/conductor/test_conductor.py +++ b/nova/tests/unit/conductor/test_conductor.py @@ -1525,7 +1525,7 @@ class _BaseTaskTestCase(object): instance=inst_obj, **compute_args) - def test_rebuild_instance_with_request_spec(self): + def test_evacuate_instance_with_request_spec(self): inst_obj = self._create_fake_instance_obj() inst_obj.host = 'noselect' expected_host = 'thebesthost' @@ -1533,10 +1533,10 @@ class _BaseTaskTestCase(object): expected_limits = None fake_selection = objects.Selection(service_host=expected_host, nodename=expected_node, limits=None) - fake_spec = objects.RequestSpec(ignore_hosts=[]) + fake_spec = objects.RequestSpec(ignore_hosts=[uuids.ignored_host]) rebuild_args, compute_args = self._prepare_rebuild_args( {'host': None, 'node': expected_node, 'limits': expected_limits, - 'request_spec': fake_spec}) + 'request_spec': fake_spec, 'recreate': True}) with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), @@ -1550,10 +1550,9 @@ class _BaseTaskTestCase(object): self.conductor_manager.rebuild_instance(context=self.context, instance=inst_obj, **rebuild_args) - if rebuild_args['recreate']: - reset_fd.assert_called_once_with() - else: - reset_fd.assert_not_called() + reset_fd.assert_called_once_with() + # The RequestSpec.ignore_hosts field should be overwritten. + self.assertEqual([inst_obj.host], fake_spec.ignore_hosts) select_dest_mock.assert_called_once_with(self.context, fake_spec, [inst_obj.uuid], return_objects=True, return_alternates=False) @@ -2047,6 +2046,60 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase): self.assertTrue(mock_build.called) + @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid') + def test_cleanup_build_artifacts(self, inst_map_get): + """Simple test to ensure the order of operations in the cleanup method + is enforced. + """ + req_spec = fake_request_spec.fake_spec_obj() + build_req = fake_build_request.fake_req_obj(self.context) + instance = build_req.instance + bdms = objects.BlockDeviceMappingList(objects=[ + objects.BlockDeviceMapping(instance_uuid=instance.uuid)]) + tags = objects.TagList(objects=[objects.Tag(tag='test')]) + cell1 = self.cell_mappings['cell1'] + cell_mapping_cache = {instance.uuid: cell1} + err = exc.TooManyInstances('test') + + # We need to assert that BDMs and tags are created in the cell DB + # before the instance mapping is updated. + def fake_create_block_device_mapping(*args, **kwargs): + inst_map_get.return_value.save.assert_not_called() + + def fake_create_tags(*args, **kwargs): + inst_map_get.return_value.save.assert_not_called() + + with test.nested( + mock.patch.object(self.conductor_manager, + '_set_vm_state_and_notify'), + mock.patch.object(self.conductor_manager, + '_create_block_device_mapping', + side_effect=fake_create_block_device_mapping), + mock.patch.object(self.conductor_manager, '_create_tags', + side_effect=fake_create_tags), + mock.patch.object(build_req, 'destroy'), + mock.patch.object(req_spec, 'destroy'), + ) as ( + _set_vm_state_and_notify, _create_block_device_mapping, + _create_tags, build_req_destroy, req_spec_destroy, + ): + self.conductor_manager._cleanup_build_artifacts( + self.context, err, [instance], [build_req], [req_spec], bdms, + tags, cell_mapping_cache) + # Assert the various mock calls. + _set_vm_state_and_notify.assert_called_once_with( + test.MatchType(context.RequestContext), instance.uuid, + 'build_instances', + {'vm_state': vm_states.ERROR, 'task_state': None}, err, req_spec) + _create_block_device_mapping.assert_called_once_with( + cell1, instance.flavor, instance.uuid, bdms) + _create_tags.assert_called_once_with( + test.MatchType(context.RequestContext), instance.uuid, tags) + inst_map_get.return_value.save.assert_called_once_with() + self.assertEqual(cell1, inst_map_get.return_value.cell_mapping) + build_req_destroy.assert_called_once_with() + req_spec_destroy.assert_called_once_with() + @mock.patch('nova.objects.CellMapping.get_by_uuid') def test_bury_in_cell0_no_cell0(self, mock_cm_get): mock_cm_get.side_effect = exc.CellMappingNotFound(uuid='0') diff --git a/nova/tests/unit/image/test_glance.py b/nova/tests/unit/image/test_glance.py index d1507827b4..782f1d8f8e 100644 --- a/nova/tests/unit/image/test_glance.py +++ b/nova/tests/unit/image/test_glance.py @@ -408,13 +408,13 @@ class TestGlanceClientWrapperRetries(test.NoDBTestCase): self.flags(api_servers=api_servers, group='glance') def assert_retry_attempted(self, sleep_mock, client, expected_url): - client.call(self.ctx, 1, 'get', 'meow') + client.call(self.ctx, 1, 'get', args=('meow',)) sleep_mock.assert_called_once_with(1) self.assertEqual(str(client.api_server), expected_url) def assert_retry_not_attempted(self, sleep_mock, client): self.assertRaises(exception.GlanceConnectionFailed, - client.call, self.ctx, 1, 'get', 'meow') + client.call, self.ctx, 1, 'get', args=('meow',)) self.assertFalse(sleep_mock.called) @mock.patch('time.sleep') @@ -488,12 +488,12 @@ class TestGlanceClientWrapperRetries(test.NoDBTestCase): # sleep (which would be an indication of a retry) self.assertRaises(exception.GlanceConnectionFailed, - client.call, self.ctx, 1, 'get', 'meow') + client.call, self.ctx, 1, 'get', args=('meow',)) self.assertEqual(str(client.api_server), 'http://host1:9292') self.assertFalse(sleep_mock.called) self.assertRaises(exception.GlanceConnectionFailed, - client.call, self.ctx, 1, 'get', 'meow') + client.call, self.ctx, 1, 'get', args=('meow',)) self.assertEqual(str(client.api_server), 'https://host2:9293') self.assertFalse(sleep_mock.called) @@ -512,6 +512,33 @@ class TestGlanceClientWrapperRetries(test.NoDBTestCase): create_client_mock.return_value = client_mock +class TestCommonPropertyNameConflicts(test.NoDBTestCase): + + """Tests that images that have common property names like "version" don't + cause an exception to be raised from the wacky GlanceClientWrapper magic + call() method. + + :see https://bugs.launchpad.net/nova/+bug/1717547 + """ + + @mock.patch('nova.image.glance.GlanceClientWrapper._create_onetime_client') + def test_version_property_conflicts(self, mock_glance_client): + client = mock.MagicMock() + mock_glance_client.return_value = client + ctx = mock.sentinel.ctx + service = glance.GlanceImageServiceV2() + + # Simulate the process of snapshotting a server that was launched with + # an image with the properties collection containing a (very + # commonly-named) "version" property. + image_meta = { + 'id': 1, + 'version': 'blows up', + } + # This call would blow up before the fix for 1717547 + service.create(ctx, image_meta) + + class TestDownloadNoDirectUri(test.NoDBTestCase): """Tests the download method of the GlanceImageServiceV2 when the @@ -530,8 +557,8 @@ class TestDownloadNoDirectUri(test.NoDBTestCase): self.assertFalse(show_mock.called) self.assertFalse(open_mock.called) - client.call.assert_called_once_with(ctx, 2, 'data', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'data', args=(mock.sentinel.image_id,)) self.assertEqual(mock.sentinel.image_chunks, res) @mock.patch.object(six.moves.builtins, 'open') @@ -546,8 +573,8 @@ class TestDownloadNoDirectUri(test.NoDBTestCase): self.assertFalse(show_mock.called) self.assertFalse(open_mock.called) - client.call.assert_called_once_with(ctx, 2, 'data', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'data', args=(mock.sentinel.image_id,)) self.assertIsNone(res) data.write.assert_has_calls( [ @@ -573,8 +600,8 @@ class TestDownloadNoDirectUri(test.NoDBTestCase): dst_path=mock.sentinel.dst_path) self.assertFalse(show_mock.called) - client.call.assert_called_once_with(ctx, 2, 'data', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'data', args=(mock.sentinel.image_id,)) open_mock.assert_called_once_with(mock.sentinel.dst_path, 'wb') fsync_mock.assert_called_once_with(writer) self.assertIsNone(res) @@ -604,8 +631,8 @@ class TestDownloadNoDirectUri(test.NoDBTestCase): self.assertFalse(show_mock.called) self.assertFalse(open_mock.called) - client.call.assert_called_once_with(ctx, 2, 'data', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'data', args=(mock.sentinel.image_id,)) self.assertIsNone(res) data.write.assert_has_calls( [ @@ -718,8 +745,8 @@ class TestDownloadNoDirectUri(test.NoDBTestCase): tran_mod.download.assert_called_once_with(ctx, mock.ANY, mock.sentinel.dst_path, mock.sentinel.loc_meta) - client.call.assert_called_once_with(ctx, 2, 'data', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'data', args=(mock.sentinel.image_id,)) fsync_mock.assert_called_once_with(writer) # NOTE(jaypipes): log messages call open() in part of the # download path, so here, we just check that the last open() @@ -767,8 +794,8 @@ class TestDownloadNoDirectUri(test.NoDBTestCase): mock.sentinel.image_id, include_locations=True) get_tran_mock.assert_called_once_with('file') - client.call.assert_called_once_with(ctx, 2, 'data', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'data', args=(mock.sentinel.image_id,)) fsync_mock.assert_called_once_with(writer) # NOTE(jaypipes): log messages call open() in part of the # download path, so here, we just check that the last open() @@ -1044,8 +1071,8 @@ class TestShow(test.NoDBTestCase): service = glance.GlanceImageServiceV2(client) info = service.show(ctx, mock.sentinel.image_id) - client.call.assert_called_once_with(ctx, 2, 'get', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'get', args=(mock.sentinel.image_id,)) is_avail_mock.assert_called_once_with(ctx, {}) trans_from_mock.assert_called_once_with({}, include_locations=False) self.assertIn('mock', info) @@ -1063,8 +1090,8 @@ class TestShow(test.NoDBTestCase): with testtools.ExpectedException(exception.ImageNotFound): service.show(ctx, mock.sentinel.image_id) - client.call.assert_called_once_with(ctx, 2, 'get', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'get', args=(mock.sentinel.image_id,)) is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) self.assertFalse(trans_from_mock.called) @@ -1082,8 +1109,8 @@ class TestShow(test.NoDBTestCase): with testtools.ExpectedException(exception.ImageNotAuthorized): service.show(ctx, mock.sentinel.image_id) - client.call.assert_called_once_with(ctx, 2, 'get', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'get', args=(mock.sentinel.image_id,)) self.assertFalse(is_avail_mock.called) self.assertFalse(trans_from_mock.called) reraise_mock.assert_called_once_with(mock.sentinel.image_id) @@ -1118,8 +1145,8 @@ class TestShow(test.NoDBTestCase): ctx = mock.sentinel.ctx service = glance.GlanceImageServiceV2(client) image_info = service.show(ctx, glance_image.id) - client.call.assert_called_once_with(ctx, 2, 'get', - glance_image.id) + client.call.assert_called_once_with( + ctx, 2, 'get', args=(glance_image.id,)) NOVA_IMAGE_ATTRIBUTES = set(['size', 'disk_format', 'owner', 'container_format', 'status', 'id', 'name', 'created_at', 'updated_at', @@ -1143,7 +1170,8 @@ class TestShow(test.NoDBTestCase): image_id = mock.sentinel.image_id info = service.show(ctx, image_id, include_locations=True) - client.call.assert_called_once_with(ctx, 2, 'get', image_id) + client.call.assert_called_once_with( + ctx, 2, 'get', args=(image_id,)) avail_mock.assert_called_once_with(ctx, mock.sentinel.image) trans_from_mock.assert_called_once_with(mock.sentinel.image, include_locations=True) @@ -1165,7 +1193,8 @@ class TestShow(test.NoDBTestCase): image_id = mock.sentinel.image_id info = service.show(ctx, image_id, include_locations=True) - client.call.assert_called_once_with(ctx, 2, 'get', image_id) + client.call.assert_called_once_with( + ctx, 2, 'get', args=(image_id,)) expected = locations expected.append({'url': mock.sentinel.duri, 'metadata': {}}) self.assertIn('locations', info) @@ -1189,8 +1218,8 @@ class TestShow(test.NoDBTestCase): with testtools.ExpectedException(exception.ImageNotFound): service.show(ctx, glance_image.id, show_deleted=False) - client.call.assert_called_once_with(ctx, 2, 'get', - glance_image.id) + client.call.assert_called_once_with( + ctx, 2, 'get', args=(glance_image.id,)) self.assertFalse(is_avail_mock.called) self.assertFalse(trans_from_mock.called) @@ -1214,7 +1243,7 @@ class TestDetail(test.NoDBTestCase): service = glance.GlanceImageServiceV2(client) images = service.detail(ctx, **params) - client.call.assert_called_once_with(ctx, 2, 'list') + client.call.assert_called_once_with(ctx, 2, 'list', kwargs={}) is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) trans_from_mock.assert_called_once_with(mock.sentinel.images_0) self.assertEqual([mock.sentinel.trans_from], images) @@ -1234,7 +1263,7 @@ class TestDetail(test.NoDBTestCase): service = glance.GlanceImageServiceV2(client) images = service.detail(ctx, **params) - client.call.assert_called_once_with(ctx, 2, 'list') + client.call.assert_called_once_with(ctx, 2, 'list', kwargs={}) is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) self.assertFalse(trans_from_mock.called) self.assertEqual([], images) @@ -1248,10 +1277,8 @@ class TestDetail(test.NoDBTestCase): service = glance.GlanceImageServiceV2(client) service.detail(ctx, page_size=5, limit=10) - client.call.assert_called_once_with(ctx, 2, 'list', - filters={}, - page_size=5, - limit=10) + client.call.assert_called_once_with( + ctx, 2, 'list', kwargs=dict(filters={}, page_size=5, limit=10)) @mock.patch('nova.image.glance._reraise_translated_exception') @mock.patch('nova.image.glance._extract_query_params_v2') @@ -1271,7 +1298,7 @@ class TestDetail(test.NoDBTestCase): with testtools.ExpectedException(exception.Forbidden): service.detail(ctx, **params) - client.call.assert_called_once_with(ctx, 2, 'list') + client.call.assert_called_once_with(ctx, 2, 'list', kwargs={}) self.assertFalse(is_avail_mock.called) self.assertFalse(trans_from_mock.called) reraise_mock.assert_called_once_with() @@ -1301,8 +1328,8 @@ class TestCreate(test.NoDBTestCase): # the call to glanceclient's update (since the image ID is # supplied as a positional arg), and that the # purge_props default is True. - client.call.assert_called_once_with(ctx, 2, 'create', - name=mock.sentinel.name) + client.call.assert_called_once_with( + ctx, 2, 'create', kwargs=dict(name=mock.sentinel.name)) trans_from_mock.assert_called_once_with({'id': '123'}) self.assertEqual(mock.sentinel.trans_from, image_meta) @@ -1338,7 +1365,7 @@ class TestCreate(test.NoDBTestCase): image_meta = service.create(ctx, image_mock) trans_to_mock.assert_called_once_with(image_mock) # Verify that the disk_format and container_format kwargs are passed. - create_call_kwargs = client.call.call_args_list[0][1] + create_call_kwargs = client.call.call_args_list[0][1]['kwargs'] self.assertEqual('vdi', create_call_kwargs['disk_format']) self.assertEqual('bare', create_call_kwargs['container_format']) trans_from_mock.assert_called_once_with({'id': '123'}) @@ -1395,7 +1422,7 @@ class TestCreate(test.NoDBTestCase): mock.sentinel.ctx) self.assertEqual(expected_disk_format, disk_format) mock_client.call.assert_called_once_with( - mock.sentinel.ctx, 2, 'get', 'image', controller='schemas') + mock.sentinel.ctx, 2, 'get', args=('image',), controller='schemas') def test_get_image_create_disk_format_default_no_schema(self): """Tests that if there is no disk_format schema we default to qcow2. @@ -1486,11 +1513,11 @@ class TestUpdate(test.NoDBTestCase): # the call to glanceclient's update (since the image ID is # supplied as a positional arg), and that the # purge_props default is True. - client.call.assert_called_once_with(ctx, 2, 'update', - image_id=mock.sentinel.image_id, - name=mock.sentinel.name, - prop_to_keep='4', - remove_props=['prop_to_remove']) + client.call.assert_called_once_with( + ctx, 2, 'update', kwargs=dict( + image_id=mock.sentinel.image_id, name=mock.sentinel.name, + prop_to_keep='4', remove_props=['prop_to_remove'], + )) trans_from_mock.assert_called_once_with(mock.sentinel.image_meta) self.assertEqual(mock.sentinel.trans_from, image_meta) @@ -1562,11 +1589,13 @@ class TestUpdate(test.NoDBTestCase): self.assertRaises(exception.ImageNotAuthorized, service.update, ctx, mock.sentinel.image_id, image) - client.call.assert_called_once_with(ctx, 2, 'update', - image_id=mock.sentinel.image_id, - name=mock.sentinel.name, - prop_to_keep='4', - remove_props=['prop_to_remove']) + client.call.assert_called_once_with( + ctx, 2, 'update', kwargs=dict( + image_id=mock.sentinel.image_id, + name=mock.sentinel.name, + prop_to_keep='4', + remove_props=['prop_to_remove'], + )) reraise_mock.assert_called_once_with(mock.sentinel.image_id) @@ -1580,8 +1609,8 @@ class TestDelete(test.NoDBTestCase): ctx = mock.sentinel.ctx service = glance.GlanceImageServiceV2(client) service.delete(ctx, mock.sentinel.image_id) - client.call.assert_called_once_with(ctx, 2, 'delete', - mock.sentinel.image_id) + client.call.assert_called_once_with( + ctx, 2, 'delete', args=(mock.sentinel.image_id,)) def test_delete_client_failure_v2(self): client = mock.MagicMock() @@ -1710,10 +1739,12 @@ class TestExtractQueryParams(test.NoDBTestCase): 'kernel-id': 'some-id', 'updated_at': 'gte:some-date'} - client.call.assert_called_once_with(ctx, 2, 'list', - filters=expected_filters_v1, - page_size=5, - limit=10) + client.call.assert_called_once_with( + ctx, 2, 'list', kwargs=dict( + filters=expected_filters_v1, + page_size=5, + limit=10, + )) class TestTranslateToGlance(test.NoDBTestCase): diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py index 79150082f5..aa57a77c71 100644 --- a/nova/tests/unit/objects/test_request_spec.py +++ b/nova/tests/unit/objects/test_request_spec.py @@ -504,7 +504,8 @@ class _TestRequestSpecObject(object): fake_spec['instance_uuid']) self.assertEqual(1, req_obj.num_instances) - self.assertEqual(['host2', 'host4'], req_obj.ignore_hosts) + # ignore_hosts is not persisted + self.assertIsNone(req_obj.ignore_hosts) self.assertEqual('fake', req_obj.project_id) self.assertEqual({'hint': ['over-there']}, req_obj.scheduler_hints) self.assertEqual(['host1', 'host3'], req_obj.force_hosts) @@ -527,7 +528,7 @@ class _TestRequestSpecObject(object): jsonutils.loads(changes['spec'])) # primitive fields - for field in ['instance_uuid', 'num_instances', 'ignore_hosts', + for field in ['instance_uuid', 'num_instances', 'project_id', 'scheduler_hints', 'force_hosts', 'availability_zone', 'force_nodes']: self.assertEqual(getattr(req_obj, field), @@ -544,6 +545,7 @@ class _TestRequestSpecObject(object): self.assertIsNone(serialized_obj.instance_group.hosts) self.assertIsNone(serialized_obj.retry) self.assertIsNone(serialized_obj.requested_destination) + self.assertIsNone(serialized_obj.ignore_hosts) def test_create(self): req_obj = fake_request_spec.fake_spec_obj(remove_id=True) @@ -564,6 +566,39 @@ class _TestRequestSpecObject(object): self.assertRaises(exception.ObjectActionError, req_obj.create) + def test_save_does_not_persist_requested_fields(self): + req_obj = fake_request_spec.fake_spec_obj(remove_id=True) + req_obj.create() + # change something to make sure _save_in_db is called + expected_destination = request_spec.Destination(host='sample-host') + req_obj.requested_destination = expected_destination + expected_retry = objects.SchedulerRetries( + num_attempts=2, + hosts=objects.ComputeNodeList(objects=[ + objects.ComputeNode(host='host1', hypervisor_hostname='node1'), + objects.ComputeNode(host='host2', hypervisor_hostname='node2'), + ])) + req_obj.retry = expected_retry + req_obj.ignore_hosts = [uuids.ignored_host] + + orig_save_in_db = request_spec.RequestSpec._save_in_db + with mock.patch.object(request_spec.RequestSpec, '_save_in_db') \ + as mock_save_in_db: + mock_save_in_db.side_effect = orig_save_in_db + req_obj.save() + mock_save_in_db.assert_called_once() + updates = mock_save_in_db.mock_calls[0][1][2] + # assert that the following fields are not stored in the db + # 1. ignore_hosts + data = jsonutils.loads(updates['spec'])['nova_object.data'] + self.assertIsNone(data['ignore_hosts']) + self.assertIsNotNone(data['instance_uuid']) + + # also we expect that the following fields are not reset after save + # 1. ignore_hosts + self.assertIsNotNone(req_obj.ignore_hosts) + self.assertEqual([uuids.ignored_host], req_obj.ignore_hosts) + def test_save(self): req_obj = fake_request_spec.fake_spec_obj() # Make sure the requested_destination is not persisted since it is diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index 9954c00e5b..d4512ee3f0 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -6954,6 +6954,66 @@ class LibvirtConnTestCase(test.NoDBTestCase, _set_cache_mode.assert_called_once_with(config) self.assertEqual(config_guest_disk.to_xml(), config.to_xml()) + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_driver') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_attach_encryptor') + def test_connect_volume_encryption_success( + self, mock_attach_encryptor, mock_get_volume_driver): + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + mock_volume_driver = mock.MagicMock( + spec=volume_drivers.LibvirtBaseVolumeDriver) + mock_get_volume_driver.return_value = mock_volume_driver + + connection_info = {'driver_volume_type': 'fake', + 'data': {'device_path': '/fake', + 'access_mode': 'rw', + 'volume_id': uuids.volume_id}} + encryption = {'provider': encryptors.LUKS, + 'encryption_key_id': uuids.encryption_key_id} + instance = mock.sentinel.instance + + drvr._connect_volume(self.context, connection_info, instance, + encryption=encryption) + + mock_get_volume_driver.assert_called_once_with(connection_info) + mock_volume_driver.connect_volume.assert_called_once_with( + connection_info, instance) + mock_attach_encryptor.assert_called_once_with( + self.context, connection_info, encryption, True) + mock_volume_driver.disconnect_volume.assert_not_called() + + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_driver') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_attach_encryptor') + def test_connect_volume_encryption_fail( + self, mock_attach_encryptor, mock_get_volume_driver): + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + mock_volume_driver = mock.MagicMock( + spec=volume_drivers.LibvirtBaseVolumeDriver) + mock_get_volume_driver.return_value = mock_volume_driver + + connection_info = {'driver_volume_type': 'fake', + 'data': {'device_path': '/fake', + 'access_mode': 'rw', + 'volume_id': uuids.volume_id}} + encryption = {'provider': encryptors.LUKS, + 'encryption_key_id': uuids.encryption_key_id} + instance = mock.sentinel.instance + mock_attach_encryptor.side_effect = processutils.ProcessExecutionError + + self.assertRaises(processutils.ProcessExecutionError, + drvr._connect_volume, + self.context, connection_info, instance, + encryption=encryption) + + mock_get_volume_driver.assert_called_once_with(connection_info) + mock_volume_driver.connect_volume.assert_called_once_with( + connection_info, instance) + mock_attach_encryptor.assert_called_once_with( + self.context, connection_info, encryption, True) + mock_volume_driver.disconnect_volume.assert_called_once_with( + connection_info, instance) + @mock.patch.object(key_manager, 'API') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption') @mock.patch.object(libvirt_driver.LibvirtDriver, '_use_native_luks') @@ -7221,11 +7281,13 @@ class LibvirtConnTestCase(test.NoDBTestCase, mock.patch.object(drvr._host, 'get_guest'), mock.patch('nova.virt.libvirt.driver.LOG'), mock.patch.object(drvr, '_connect_volume'), + mock.patch.object(drvr, '_disconnect_volume'), mock.patch.object(drvr, '_get_volume_config'), mock.patch.object(drvr, '_check_discard_for_attach_volume'), mock.patch.object(drvr, '_build_device_metadata'), ) as (mock_get_guest, mock_log, mock_connect_volume, - mock_get_volume_config, mock_check_discard, mock_build_metadata): + mock_disconnect_volume, mock_get_volume_config, + mock_check_discard, mock_build_metadata): mock_conf = mock.MagicMock() mock_guest = mock.MagicMock() @@ -7239,6 +7301,50 @@ class LibvirtConnTestCase(test.NoDBTestCase, self.context, connection_info, instance, "/dev/vdb", disk_bus=bdm['disk_bus'], device_type=bdm['device_type']) mock_log.warning.assert_called_once() + mock_disconnect_volume.assert_called_once() + + @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') + def test_attach_volume_with_libvirt_exception(self, mock_get_info): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = objects.Instance(**self.test_instance) + connection_info = {"driver_volume_type": "fake", + "data": {"device_path": "/fake", + "access_mode": "rw"}} + bdm = {'device_name': 'vdb', + 'disk_bus': 'fake-bus', + 'device_type': 'fake-type'} + disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], + 'dev': 'vdb'} + libvirt_exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError, + "Target vdb already exists', device is busy", + error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) + + with test.nested( + mock.patch.object(drvr._host, 'get_guest'), + mock.patch('nova.virt.libvirt.driver.LOG'), + mock.patch.object(drvr, '_connect_volume'), + mock.patch.object(drvr, '_disconnect_volume'), + mock.patch.object(drvr, '_get_volume_config'), + mock.patch.object(drvr, '_check_discard_for_attach_volume'), + mock.patch.object(drvr, '_build_device_metadata'), + ) as (mock_get_guest, mock_log, mock_connect_volume, + mock_disconnect_volume, mock_get_volume_config, + mock_check_discard, mock_build_metadata): + + mock_conf = mock.MagicMock() + mock_guest = mock.MagicMock() + mock_guest.attach_device.side_effect = libvirt_exc + mock_get_volume_config.return_value = mock_conf + mock_get_guest.return_value = mock_guest + mock_get_info.return_value = disk_info + mock_build_metadata.return_value = objects.InstanceDeviceMetadata() + + self.assertRaises(fakelibvirt.libvirtError, drvr.attach_volume, + self.context, connection_info, instance, "/dev/vdb", + disk_bus=bdm['disk_bus'], device_type=bdm['device_type']) + mock_log.exception.assert_called_once_with(u'Failed to attach ' + 'volume at mountpoint: %s', '/dev/vdb', instance=instance) + mock_disconnect_volume.assert_called_once() @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') diff --git a/nova/tests/unit/virt/libvirt/test_guest.py b/nova/tests/unit/virt/libvirt/test_guest.py index c88e022a81..7c61334a5a 100644 --- a/nova/tests/unit/virt/libvirt/test_guest.py +++ b/nova/tests/unit/virt/libvirt/test_guest.py @@ -324,6 +324,28 @@ class GuestTestCase(test.NoDBTestCase): # Some time later, we can do the wait/retry to ensure detach self.assertRaises(exception.DeviceNotFound, retry_detach) + @mock.patch.object(libvirt_guest.Guest, "detach_device") + def test_detach_device_with_retry_operation_internal(self, mock_detach): + # This simulates a retry of the transient/live domain detach + # failing because the device is not found + conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) + conf.to_xml.return_value = "</xml>" + self.domain.isPersistent.return_value = True + + get_config = mock.Mock(return_value=conf) + fake_device = "vdb" + fake_exc = fakelibvirt.make_libvirtError( + fakelibvirt.libvirtError, "", + error_message="operation failed: disk vdb not found", + error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR, + error_domain=fakelibvirt.VIR_FROM_DOMAIN) + mock_detach.side_effect = [None, fake_exc] + retry_detach = self.guest.detach_device_with_retry( + get_config, fake_device, live=True, + inc_sleep_time=.01, max_retry_count=3) + # Some time later, we can do the wait/retry to ensure detach + self.assertRaises(exception.DeviceNotFound, retry_detach) + def test_detach_device_with_retry_invalid_argument(self): # This simulates a persistent domain detach failing because # the device is not found diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py index 6999a0345f..4971758617 100644 --- a/nova/tests/unit/virt/libvirt/test_imagebackend.py +++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py @@ -21,6 +21,7 @@ import shutil import tempfile from castellan import key_manager +import ddt import fixtures import mock from oslo_concurrency import lockutils @@ -57,6 +58,7 @@ class FakeConn(object): return FakeSecret() +@ddt.ddt class _ImageTestCase(object): def mock_create_image(self, image): @@ -203,6 +205,24 @@ class _ImageTestCase(object): self.assertEqual(2361393152, image.get_disk_size(image.path)) get_disk_size.assert_called_once_with(image.path) + def _test_libvirt_info_scsi_with_unit(self, disk_unit): + # The address should be set if bus is scsi and unit is set. + # Otherwise, it should not be set at all. + image = self.image_class(self.INSTANCE, self.NAME) + disk = image.libvirt_info(disk_bus='scsi', disk_dev='/dev/sda', + device_type='disk', cache_mode='none', + extra_specs={}, hypervisor_version=4004001, + disk_unit=disk_unit) + if disk_unit: + self.assertEqual(0, disk.device_addr.controller) + self.assertEqual(disk_unit, disk.device_addr.unit) + else: + self.assertIsNone(disk.device_addr) + + @ddt.data(5, None) + def test_libvirt_info_scsi_with_unit(self, disk_unit): + self._test_libvirt_info_scsi_with_unit(disk_unit) + class FlatTestCase(_ImageTestCase, test.NoDBTestCase): @@ -1276,6 +1296,7 @@ class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase): model) +@ddt.ddt class RbdTestCase(_ImageTestCase, test.NoDBTestCase): FSID = "FakeFsID" POOL = "FakePool" @@ -1500,6 +1521,17 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase): super(RbdTestCase, self).test_libvirt_info() + @ddt.data(5, None) + @mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs") + def test_libvirt_info_scsi_with_unit(self, disk_unit, mock_mon_addrs): + def get_mon_addrs(): + hosts = ["server1", "server2"] + ports = ["1899", "1920"] + return hosts, ports + mock_mon_addrs.side_effect = get_mon_addrs + + super(RbdTestCase, self)._test_libvirt_info_scsi_with_unit(disk_unit) + @mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs") def test_get_model(self, mock_mon_addrs): pool = "FakePool" diff --git a/nova/tests/unit/virt/libvirt/volume/test_volume.py b/nova/tests/unit/virt/libvirt/volume/test_volume.py index c6bb8b93b2..5733e00412 100644 --- a/nova/tests/unit/virt/libvirt/volume/test_volume.py +++ b/nova/tests/unit/virt/libvirt/volume/test_volume.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import ddt import mock from nova import exception @@ -130,6 +131,7 @@ class LibvirtISCSIVolumeBaseTestCase(LibvirtVolumeBaseTestCase): return ret +@ddt.ddt class LibvirtVolumeTestCase(LibvirtISCSIVolumeBaseTestCase): def _assertDiskInfoEquals(self, tree, disk_info): @@ -373,3 +375,21 @@ class LibvirtVolumeTestCase(LibvirtISCSIVolumeBaseTestCase): conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertIsNone(tree.find("encryption")) + + @ddt.data(5, None) + def test_libvirt_volume_driver_address_tag_scsi_unit(self, disk_unit): + # The address tag should be set if bus is scsi and unit is set. + # Otherwise, it should not be set at all. + libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host) + connection_info = {'data': {'device_path': '/foo'}} + disk_info = {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'} + if disk_unit: + disk_info['unit'] = disk_unit + conf = libvirt_driver.get_config(connection_info, disk_info) + tree = conf.format_dom() + address = tree.find('address') + if disk_unit: + self.assertEqual('0', address.attrib['controller']) + self.assertEqual(str(disk_unit), address.attrib['unit']) + else: + self.assertIsNone(address) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index e20d11f08c..5f0e16e418 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -544,7 +544,11 @@ class FakeDriver(driver.ComputeDriver): return def confirm_migration(self, context, migration, instance, network_info): - return + # Confirm migration cleans up the guest from the source host so just + # destroy the guest to remove it from the list of tracked instances + # unless it is a same-host resize. + if migration.source_compute != migration.dest_compute: + self.destroy(context, instance, network_info) def pre_live_migration(self, context, instance, block_device_info, network_info, disk_info, migrate_data): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 7762c787bd..44cdc7d53a 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1251,8 +1251,15 @@ class LibvirtDriver(driver.ComputeDriver): encryption=None, allow_native_luks=True): vol_driver = self._get_volume_driver(connection_info) vol_driver.connect_volume(connection_info, instance) - self._attach_encryptor(context, connection_info, encryption, - allow_native_luks) + try: + self._attach_encryptor( + context, connection_info, encryption, allow_native_luks) + except Exception: + # Encryption failed so rollback the volume connection. + with excutils.save_and_reraise_exception(logger=LOG): + LOG.exception("Failure attaching encryptor; rolling back " + "volume connection", instance=instance) + vol_driver.disconnect_volume(connection_info, instance) def _should_disconnect_target(self, context, connection_info, instance): connection_count = 0 @@ -1483,11 +1490,17 @@ class LibvirtDriver(driver.ComputeDriver): # distributions provide Libvirt 3.3.0 or earlier with # https://libvirt.org/git/?p=libvirt.git;a=commit;h=7189099 applied. except libvirt.libvirtError as ex: - if 'Incorrect number of padding bytes' in six.text_type(ex): - LOG.warning(_('Failed to attach encrypted volume due to a ' - 'known Libvirt issue, see the following bug for details: ' - 'https://bugzilla.redhat.com/show_bug.cgi?id=1447297')) - raise + with excutils.save_and_reraise_exception(): + if 'Incorrect number of padding bytes' in six.text_type(ex): + LOG.warning(_('Failed to attach encrypted volume due to a ' + 'known Libvirt issue, see the following bug ' + 'for details: ' + 'https://bugzilla.redhat.com/1447297')) + else: + LOG.exception(_('Failed to attach volume at mountpoint: ' + '%s'), mountpoint, instance=instance) + self._disconnect_volume(context, connection_info, instance, + encryption=encryption) except Exception: LOG.exception(_('Failed to attach volume at mountpoint: %s'), mountpoint, instance=instance) diff --git a/nova/virt/libvirt/guest.py b/nova/virt/libvirt/guest.py index e373fbade0..40fc213ecf 100644 --- a/nova/virt/libvirt/guest.py +++ b/nova/virt/libvirt/guest.py @@ -404,7 +404,8 @@ class Guest(object): except libvirt.libvirtError as ex: with excutils.save_and_reraise_exception(): errcode = ex.get_error_code() - if errcode == libvirt.VIR_ERR_OPERATION_FAILED: + if errcode in (libvirt.VIR_ERR_OPERATION_FAILED, + libvirt.VIR_ERR_INTERNAL_ERROR): errmsg = ex.get_error_message() if 'not found' in errmsg: # This will be raised if the live domain diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 79edbf1842..f260296d2e 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -180,11 +180,17 @@ class Image(object): return info def disk_scsi(self, info, disk_unit): - # The driver is responsible to create the SCSI controller - # at index 0. - info.device_addr = vconfig.LibvirtConfigGuestDeviceAddressDrive() - info.device_addr.controller = 0 + # NOTE(melwitt): We set the device address unit number manually in the + # case of the virtio-scsi controller, in order to allow attachment of + # up to 256 devices. So, we should only be setting the address tag + # if we intend to set the unit number. Otherwise, we will let libvirt + # handle autogeneration of the address tag. + # See https://bugs.launchpad.net/nova/+bug/1792077 for details. if disk_unit is not None: + # The driver is responsible to create the SCSI controller + # at index 0. + info.device_addr = vconfig.LibvirtConfigGuestDeviceAddressDrive() + info.device_addr.controller = 0 # In order to allow up to 256 disks handled by one # virtio-scsi controller, the device addr should be # specified. diff --git a/nova/virt/libvirt/volume/volume.py b/nova/virt/libvirt/volume/volume.py index 48145a9d3b..5e7bddaa1c 100644 --- a/nova/virt/libvirt/volume/volume.py +++ b/nova/virt/libvirt/volume/volume.py @@ -94,16 +94,21 @@ class LibvirtBaseVolumeDriver(object): if data.get('discard', False) is True: conf.driver_discard = 'unmap' - if disk_info['bus'] == 'scsi': + # NOTE(melwitt): We set the device address unit number manually in the + # case of the virtio-scsi controller, in order to allow attachment of + # up to 256 devices. So, we should only be setting the address tag + # if we intend to set the unit number. Otherwise, we will let libvirt + # handle autogeneration of the address tag. + # See https://bugs.launchpad.net/nova/+bug/1792077 for details. + if disk_info['bus'] == 'scsi' and 'unit' in disk_info: # The driver is responsible to create the SCSI controller # at index 0. conf.device_addr = vconfig.LibvirtConfigGuestDeviceAddressDrive() conf.device_addr.controller = 0 - if 'unit' in disk_info: - # In order to allow up to 256 disks handled by one - # virtio-scsi controller, the device addr should be - # specified. - conf.device_addr.unit = disk_info['unit'] + # In order to allow up to 256 disks handled by one + # virtio-scsi controller, the device addr should be + # specified. + conf.device_addr.unit = disk_info['unit'] if connection_info.get('multiattach', False): # Note that driver_cache should be disabled (none) when using diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index 70e5d39dfd..9362b02d78 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -321,6 +321,9 @@ def _untranslate_volume_summary_view(context, vol): d['shared_targets'] = vol.shared_targets d['service_uuid'] = vol.service_uuid + if hasattr(vol, 'migration_status'): + d['migration_status'] = vol.migration_status + return d diff --git a/playbooks/legacy/nova-cells-v1/run.yaml b/playbooks/legacy/nova-cells-v1/run.yaml index 24c8c8f90d..c5f3e98daf 100644 --- a/playbooks/legacy/nova-cells-v1/run.yaml +++ b/playbooks/legacy/nova-cells-v1/run.yaml @@ -13,12 +13,12 @@ set -x cat > clonemap.yaml << EOF clonemap: - - name: openstack-infra/devstack-gate + - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ - https://git.openstack.org \ - openstack-infra/devstack-gate + https://opendev.org \ + openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' diff --git a/playbooks/legacy/nova-grenade-live-migration/post.yaml b/playbooks/legacy/nova-grenade-live-migration/post.yaml new file mode 100644 index 0000000000..e07f5510ae --- /dev/null +++ b/playbooks/legacy/nova-grenade-live-migration/post.yaml @@ -0,0 +1,15 @@ +- hosts: primary + tasks: + + - name: Copy files from {{ ansible_user_dir }}/workspace/ on node + synchronize: + src: '{{ ansible_user_dir }}/workspace/' + dest: '{{ zuul.executor.log_root }}' + mode: pull + copy_links: true + verify_host: true + rsync_opts: + - --include=/logs/** + - --include=*/ + - --exclude=* + - --prune-empty-dirs diff --git a/playbooks/legacy/nova-grenade-live-migration/run.yaml b/playbooks/legacy/nova-grenade-live-migration/run.yaml new file mode 100644 index 0000000000..7c476f117d --- /dev/null +++ b/playbooks/legacy/nova-grenade-live-migration/run.yaml @@ -0,0 +1,58 @@ +- hosts: primary + name: nova-grenade-live-migration + tasks: + + - name: Ensure legacy workspace directory + file: + path: '{{ ansible_user_dir }}/workspace' + state: directory + + - shell: + cmd: | + set -e + set -x + cat > clonemap.yaml << EOF + clonemap: + - name: openstack/devstack-gate + dest: devstack-gate + EOF + /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ + https://opendev.org \ + openstack/devstack-gate + executable: /bin/bash + chdir: '{{ ansible_user_dir }}/workspace' + environment: '{{ zuul | zuul_legacy_vars }}' + + - shell: + cmd: | + set -e + set -x + export PROJECTS="openstack/grenade $PROJECTS" + export PYTHONUNBUFFERED=true + export DEVSTACK_GATE_CONFIGDRIVE=0 + export DEVSTACK_GATE_NEUTRON=1 + export DEVSTACK_GATE_TEMPEST_NOTESTS=1 + export DEVSTACK_GATE_GRENADE=pullup + # By default grenade runs only smoke tests so we need to set + # RUN_SMOKE to False in order to run live migration tests using + # grenade + export DEVSTACK_LOCAL_CONFIG="RUN_SMOKE=False" + # LIVE_MIGRATE_BACK_AND_FORTH will tell Tempest to run a live + # migration of the same instance to one compute node and then back + # to the other, which is mostly only interesting for grenade since + # we have mixed level computes. + export DEVSTACK_LOCAL_CONFIG+=$'\n'"LIVE_MIGRATE_BACK_AND_FORTH=True" + export BRANCH_OVERRIDE=default + export DEVSTACK_GATE_TOPOLOGY="multinode" + if [ "$BRANCH_OVERRIDE" != "default" ] ; then + export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE + fi + function post_test_hook { + /opt/stack/new/nova/nova/tests/live_migration/hooks/run_tests.sh + } + export -f post_test_hook + cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh + ./safe-devstack-vm-gate-wrap.sh + executable: /bin/bash + chdir: '{{ ansible_user_dir }}/workspace' + environment: '{{ zuul | zuul_legacy_vars }}' diff --git a/playbooks/legacy/nova-live-migration/run.yaml b/playbooks/legacy/nova-live-migration/run.yaml index a8bada0e17..64c7eb1e20 100644 --- a/playbooks/legacy/nova-live-migration/run.yaml +++ b/playbooks/legacy/nova-live-migration/run.yaml @@ -13,12 +13,12 @@ set -x cat > clonemap.yaml << EOF clonemap: - - name: openstack-infra/devstack-gate + - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ - https://git.openstack.org \ - openstack-infra/devstack-gate + https://opendev.org \ + openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' diff --git a/playbooks/legacy/nova-lvm/run.yaml b/playbooks/legacy/nova-lvm/run.yaml index cea89f28a2..3d84ff5223 100644 --- a/playbooks/legacy/nova-lvm/run.yaml +++ b/playbooks/legacy/nova-lvm/run.yaml @@ -13,12 +13,12 @@ set -x cat > clonemap.yaml << EOF clonemap: - - name: openstack-infra/devstack-gate + - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ - https://git.openstack.org \ - openstack-infra/devstack-gate + https://opendev.org \ + openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' diff --git a/playbooks/legacy/nova-multiattach/run.yaml b/playbooks/legacy/nova-multiattach/run.yaml index e48e58e04e..268ad3f665 100644 --- a/playbooks/legacy/nova-multiattach/run.yaml +++ b/playbooks/legacy/nova-multiattach/run.yaml @@ -13,12 +13,12 @@ set -x cat > clonemap.yaml << EOF clonemap: - - name: openstack-infra/devstack-gate + - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ - https://git.openstack.org \ - openstack-infra/devstack-gate + https://opendev.org \ + openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' diff --git a/playbooks/legacy/nova-next/run.yaml b/playbooks/legacy/nova-next/run.yaml index ef2531b079..73195798d9 100644 --- a/playbooks/legacy/nova-next/run.yaml +++ b/playbooks/legacy/nova-next/run.yaml @@ -13,12 +13,12 @@ set -x cat > clonemap.yaml << EOF clonemap: - - name: openstack-infra/devstack-gate + - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ - https://git.openstack.org \ - openstack-infra/devstack-gate + https://opendev.org \ + openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' |